From c617445fec616bbc9e92013869d33d4b43294642 Mon Sep 17 00:00:00 2001 From: Paul Forman Date: Wed, 29 Jul 2015 15:44:02 -0600 Subject: [PATCH 001/100] Update AWS ASG termination policy code and tests The initial commit of AWS autoscaling group termination policy was unfinished. It only worked on "create", and so had a needless ForceNew that would rebuild autoscaling groups on any change. It also used a HashString set, so it didn't preserve ordering of multiple policies correctly. Added the "update" operation, and converted to a TypeList to preserve ordering. In addition, removing the policy or setting it to a null list will reset the policy to "Default", the standard AWS policy. Updated the acceptance tests to verify the update, but the null case is difficult to test. --- .../aws/resource_aws_autoscaling_group.go | 28 ++++++++++++++----- .../resource_aws_autoscaling_group_test.go | 9 ++++-- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index 88fa2561d..52aab5acd 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -112,12 +112,9 @@ func resourceAwsAutoscalingGroup() *schema.Resource { }, "termination_policies": &schema.Schema{ - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, - Computed: true, - ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, "tag": autoscalingTagsSchema(), @@ -169,9 +166,8 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) autoScalingGroupOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List()) } - if v, ok := d.GetOk("termination_policies"); ok && v.(*schema.Set).Len() > 0 { - autoScalingGroupOpts.TerminationPolicies = expandStringList( - v.(*schema.Set).List()) + if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { + autoScalingGroupOpts.TerminationPolicies = expandStringList(v.([]interface{})) } log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", autoScalingGroupOpts) @@ -262,6 +258,24 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("termination_policies") { + // If the termination policy is set to null, we need to explicitly set + // it back to "Default", or the API won't reset it for us. + // This means GetOk() will fail us on the zero check. + v := d.Get("termination_policies") + if len(v.([]interface{})) > 0 { + opts.TerminationPolicies = expandStringList(v.([]interface{})) + } else { + // Policies is a slice of string pointers, so build one. + // Maybe there's a better idiom for this? + log.Printf("[DEBUG] Explictly setting null termination policy to 'Default'") + pol := "Default" + s := make([]*string, 1, 1) + s[0] = &pol + opts.TerminationPolicies = s + } + } + if err := setAutoscalingTags(conn, d); err != nil { return err } else { diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go index 814a51bc7..1bc1cea88 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go @@ -45,7 +45,9 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "force_delete", "true"), resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.912102603", "OldestInstance"), + "aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"), + resource.TestCheckResourceAttr( + "aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"), ), }, @@ -56,6 +58,8 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "desired_capacity", "5"), + resource.TestCheckResourceAttr( + "aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"), testLaunchConfigurationName("aws_autoscaling_group.bar", &lc), testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{ "value": "bar-foo", @@ -359,7 +363,7 @@ resource "aws_autoscaling_group" "bar" { health_check_type = "ELB" desired_capacity = 4 force_delete = true - termination_policies = ["OldestInstance"] + termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] launch_configuration = "${aws_launch_configuration.foobar.name}" @@ -391,6 +395,7 @@ resource "aws_autoscaling_group" "bar" { health_check_type = "ELB" desired_capacity = 5 force_delete = true + termination_policies = ["ClosestToNextInstanceHour"] launch_configuration = "${aws_launch_configuration.new.name}" From a29ee391eeeba31d8607928ab419ce914f664633 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Fri, 21 Aug 2015 18:34:22 +0200 Subject: [PATCH 002/100] [Vagrantfile] upgrade all packages while provisioning --- Vagrantfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Vagrantfile b/Vagrantfile index 5b2d70bcc..061c6316a 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -13,6 +13,7 @@ ARCH=`uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|'` # Install Prereq Packages sudo apt-get update +sudo apt-get upgrade -y sudo apt-get install -y build-essential curl git-core libpcre3-dev mercurial pkg-config zip # Install Go From 8411e5e5bd16b1247541d52d9781b1302cddf3c3 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Fri, 21 Aug 2015 19:54:16 +0200 Subject: [PATCH 003/100] [Vagrantfile] set resources for the provider 'virtualbox' The default resources (384 MByte memory and 1 VCPU) of the used box are not sufficient to create binaries for testing Terraform locally. --- Vagrantfile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Vagrantfile b/Vagrantfile index 5b2d70bcc..be5eefddf 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -53,4 +53,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| v.vmx["numvcpus"] = "2" end end + + config.vm.provider "virtualbox" do |v| + v.memory = 4096 + v.cpus = 2 + end end From 2e51915431c74d59e6cdb3fd8eccd156bdc38353 Mon Sep 17 00:00:00 2001 From: Sharif Nassar Date: Mon, 31 Aug 2015 15:37:09 -0700 Subject: [PATCH 004/100] Colorize the 'forces new resource' message. Sometimes in all the output from ```terraform plan```, it is difficult to see the ```(forces new resource)``` message. This patch adds a little bit of color. --- command/format_plan.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/format_plan.go b/command/format_plan.go index 66df5f8c2..daf3f60aa 100644 --- a/command/format_plan.go +++ b/command/format_plan.go @@ -131,7 +131,7 @@ func formatPlanModuleExpand( newResource := "" if attrDiff.RequiresNew && rdiff.Destroy { - newResource = " (forces new resource)" + newResource = opts.Color.Color(" [red](forces new resource)") } buf.WriteString(fmt.Sprintf( From 32832ba030c1d2b53274c134b6125ab7cd37d653 Mon Sep 17 00:00:00 2001 From: Kevin Nuckolls Date: Tue, 15 Sep 2015 16:00:12 -0500 Subject: [PATCH 005/100] adds triggers to the null resource --- builtin/providers/null/resource.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/builtin/providers/null/resource.go b/builtin/providers/null/resource.go index 0badf346c..bd1e6f89c 100644 --- a/builtin/providers/null/resource.go +++ b/builtin/providers/null/resource.go @@ -19,7 +19,13 @@ func resource() *schema.Resource { Update: resourceUpdate, Delete: resourceDelete, - Schema: map[string]*schema.Schema{}, + Schema: map[string]*schema.Schema{ + "triggers": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, } } From 3c6faf068f86534a47fefff998541db3bfa591e1 Mon Sep 17 00:00:00 2001 From: Jason Gedge Date: Thu, 23 Jul 2015 15:31:24 -0400 Subject: [PATCH 006/100] Allow setting the notification topic ARN for ElastiCache clusters. --- .../aws/resource_aws_elasticache_cluster.go | 24 ++++++++++++++++++- .../aws/r/elasticache_cluster.html.markdown | 4 ++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 093ea88f8..968a5c9cf 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -112,7 +112,10 @@ func resourceAwsElasticacheCluster() *schema.Resource { }, }, }, - + "notification_topic_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, // A single-element string list containing an Amazon Resource Name (ARN) that // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot // file will be used to populate the node group. @@ -182,6 +185,10 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.PreferredMaintenanceWindow = aws.String(v.(string)) } + if v, ok := d.GetOk("notification_topic_arn"); ok { + req.NotificationTopicArn = aws.String(v.(string)) + } + snaps := d.Get("snapshot_arns").(*schema.Set).List() if len(snaps) > 0 { s := expandStringList(snaps) @@ -244,6 +251,11 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) + if c.NotificationConfiguration != nil { + if *c.NotificationConfiguration.TopicStatus == "active" { + d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) + } + } if err := setCacheNodeData(d, c); err != nil { return err @@ -307,6 +319,16 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ requestUpdate = true } + if d.HasChange("notification_topic_arn") { + v := d.Get("notification_topic_arn").(string) + req.NotificationTopicArn = aws.String(v) + if v == "" { + inactive := "inactive" + req.NotificationTopicStatus = &inactive + } + requestUpdate = true + } + if d.HasChange("engine_version") { req.EngineVersion = aws.String(d.Get("engine_version").(string)) requestUpdate = true diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 2fe6a8dcf..d2cec07e8 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -73,6 +73,10 @@ names to associate with this cache cluster Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. Example: `arn:aws:s3:::my_bucket/snapshot1.rdb` +* `notification_topic_arn` – (Optional) An Amazon Resource Name (ARN) of an +SNS topic to send ElastiCache notifications to. Example: +`arn:aws:sns:us-east-1:012345678999:my_sns_topic` + * `tags` - (Optional) A mapping of tags to assign to the resource. From 0bcf557198a2d3997626d7f0cfd7f6bb6d57cf99 Mon Sep 17 00:00:00 2001 From: Antoine Grondin Date: Sun, 27 Sep 2015 00:06:51 -0400 Subject: [PATCH 007/100] use official Go client for DigitalOcean provider --- builtin/providers/digitalocean/config.go | 15 +- .../resource_digitalocean_domain.go | 21 +-- .../resource_digitalocean_domain_test.go | 20 +-- .../resource_digitalocean_droplet.go | 138 ++++++++++++------ .../resource_digitalocean_droplet_test.go | 84 ++++++----- .../resource_digitalocean_record.go | 86 +++++++---- .../resource_digitalocean_record_test.go | 44 ++++-- .../resource_digitalocean_ssh_key.go | 45 ++++-- .../resource_digitalocean_ssh_key_test.go | 31 ++-- 9 files changed, 311 insertions(+), 173 deletions(-) diff --git a/builtin/providers/digitalocean/config.go b/builtin/providers/digitalocean/config.go index c9a43bc09..498bf790b 100644 --- a/builtin/providers/digitalocean/config.go +++ b/builtin/providers/digitalocean/config.go @@ -3,7 +3,8 @@ package digitalocean import ( "log" - "github.com/pearkes/digitalocean" + "github.com/digitalocean/godo" + "golang.org/x/oauth2" ) type Config struct { @@ -11,14 +12,14 @@ type Config struct { } // Client() returns a new client for accessing digital ocean. -func (c *Config) Client() (*digitalocean.Client, error) { - client, err := digitalocean.NewClient(c.Token) +func (c *Config) Client() (*godo.Client, error) { + tokenSrc := oauth2.StaticTokenSource(&oauth2.Token{ + AccessToken: c.Token, + }) - log.Printf("[INFO] DigitalOcean Client configured for URL: %s", client.URL) + client := godo.NewClient(oauth2.NewClient(oauth2.NoContext, tokenSrc)) - if err != nil { - return nil, err - } + log.Printf("[INFO] DigitalOcean Client configured for URL: %s", client.BaseURL.String()) return client, nil } diff --git a/builtin/providers/digitalocean/resource_digitalocean_domain.go b/builtin/providers/digitalocean/resource_digitalocean_domain.go index 8ab5f1884..d7c4edca1 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_domain.go +++ b/builtin/providers/digitalocean/resource_digitalocean_domain.go @@ -5,8 +5,8 @@ import ( "log" "strings" + "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/schema" - "github.com/pearkes/digitalocean" ) func resourceDigitalOceanDomain() *schema.Resource { @@ -32,30 +32,31 @@ func resourceDigitalOceanDomain() *schema.Resource { } func resourceDigitalOceanDomainCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) // Build up our creation options - opts := &digitalocean.CreateDomain{ + + opts := &godo.DomainCreateRequest{ Name: d.Get("name").(string), IPAddress: d.Get("ip_address").(string), } log.Printf("[DEBUG] Domain create configuration: %#v", opts) - name, err := client.CreateDomain(opts) + domain, _, err := client.Domains.Create(opts) if err != nil { return fmt.Errorf("Error creating Domain: %s", err) } - d.SetId(name) - log.Printf("[INFO] Domain Name: %s", name) + d.SetId(domain.Name) + log.Printf("[INFO] Domain Name: %s", domain.Name) return resourceDigitalOceanDomainRead(d, meta) } func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) - domain, err := client.RetrieveDomain(d.Id()) + domain, _, err := client.Domains.Get(d.Id()) if err != nil { // If the domain is somehow already destroyed, mark as // successfully gone @@ -73,10 +74,10 @@ func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) er } func resourceDigitalOceanDomainDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) log.Printf("[INFO] Deleting Domain: %s", d.Id()) - err := client.DestroyDomain(d.Id()) + _, err := client.Domains.Delete(d.Id()) if err != nil { return fmt.Errorf("Error deleting Domain: %s", err) } diff --git a/builtin/providers/digitalocean/resource_digitalocean_domain_test.go b/builtin/providers/digitalocean/resource_digitalocean_domain_test.go index 918eea155..2801414ee 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_domain_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_domain_test.go @@ -4,13 +4,13 @@ import ( "fmt" "testing" + "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "github.com/pearkes/digitalocean" ) func TestAccDigitalOceanDomain_Basic(t *testing.T) { - var domain digitalocean.Domain + var domain godo.Domain resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -33,7 +33,7 @@ func TestAccDigitalOceanDomain_Basic(t *testing.T) { } func testAccCheckDigitalOceanDomainDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*digitalocean.Client) + client := testAccProvider.Meta().(*godo.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "digitalocean_domain" { @@ -41,17 +41,17 @@ func testAccCheckDigitalOceanDomainDestroy(s *terraform.State) error { } // Try to find the domain - _, err := client.RetrieveDomain(rs.Primary.ID) + _, _, err := client.Domains.Get(rs.Primary.ID) if err == nil { - fmt.Errorf("Domain still exists") + return fmt.Errorf("Domain still exists") } } return nil } -func testAccCheckDigitalOceanDomainAttributes(domain *digitalocean.Domain) resource.TestCheckFunc { +func testAccCheckDigitalOceanDomainAttributes(domain *godo.Domain) resource.TestCheckFunc { return func(s *terraform.State) error { if domain.Name != "foobar-test-terraform.com" { @@ -62,7 +62,7 @@ func testAccCheckDigitalOceanDomainAttributes(domain *digitalocean.Domain) resou } } -func testAccCheckDigitalOceanDomainExists(n string, domain *digitalocean.Domain) resource.TestCheckFunc { +func testAccCheckDigitalOceanDomainExists(n string, domain *godo.Domain) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -74,9 +74,9 @@ func testAccCheckDigitalOceanDomainExists(n string, domain *digitalocean.Domain) return fmt.Errorf("No Record ID is set") } - client := testAccProvider.Meta().(*digitalocean.Client) + client := testAccProvider.Meta().(*godo.Client) - foundDomain, err := client.RetrieveDomain(rs.Primary.ID) + foundDomain, _, err := client.Domains.Get(rs.Primary.ID) if err != nil { return err @@ -86,7 +86,7 @@ func testAccCheckDigitalOceanDomainExists(n string, domain *digitalocean.Domain) return fmt.Errorf("Record not found") } - *domain = foundDomain + *domain = *foundDomain return nil } diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet.go b/builtin/providers/digitalocean/resource_digitalocean_droplet.go index 88c0c6d07..eb4a195ea 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet.go @@ -3,12 +3,13 @@ package digitalocean import ( "fmt" "log" + "strconv" "strings" "time" + "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" - "github.com/pearkes/digitalocean" ) func resourceDigitalOceanDroplet() *schema.Resource { @@ -101,11 +102,13 @@ func resourceDigitalOceanDroplet() *schema.Resource { } func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) // Build up our creation options - opts := &digitalocean.CreateDroplet{ - Image: d.Get("image").(string), + opts := &godo.DropletCreateRequest{ + Image: godo.DropletCreateImage{ + Slug: d.Get("image").(string), + }, Name: d.Get("name").(string), Region: d.Get("region").(string), Size: d.Get("size").(string), @@ -116,7 +119,7 @@ func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) } if attr, ok := d.GetOk("ipv6"); ok { - opts.IPV6 = attr.(bool) + opts.IPv6 = attr.(bool) } if attr, ok := d.GetOk("private_networking"); ok { @@ -128,25 +131,32 @@ func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) } // Get configured ssh_keys - ssh_keys := d.Get("ssh_keys.#").(int) - if ssh_keys > 0 { - opts.SSHKeys = make([]string, 0, ssh_keys) - for i := 0; i < ssh_keys; i++ { + sshKeys := d.Get("ssh_keys.#").(int) + if sshKeys > 0 { + opts.SSHKeys = make([]godo.DropletCreateSSHKey, 0, sshKeys) + for i := 0; i < sshKeys; i++ { key := fmt.Sprintf("ssh_keys.%d", i) - opts.SSHKeys = append(opts.SSHKeys, d.Get(key).(string)) + id, err := strconv.Atoi(d.Get(key).(string)) + if err != nil { + return err + } + + opts.SSHKeys = append(opts.SSHKeys, godo.DropletCreateSSHKey{ + ID: id, + }) } } log.Printf("[DEBUG] Droplet create configuration: %#v", opts) - id, err := client.CreateDroplet(opts) + droplet, _, err := client.Droplets.Create(opts) if err != nil { return fmt.Errorf("Error creating droplet: %s", err) } // Assign the droplets id - d.SetId(id) + d.SetId(strconv.Itoa(droplet.ID)) log.Printf("[INFO] Droplet ID: %s", d.Id()) @@ -160,10 +170,15 @@ func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) } func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid droplet id: %v", err) + } // Retrieve the droplet properties for updating the state - droplet, err := client.RetrieveDroplet(d.Id()) + droplet, _, err := client.Droplets.Get(id) if err != nil { // check if the droplet no longer exists. if err.Error() == "Error retrieving droplet: API Error: 404 Not Found" { @@ -174,48 +189,70 @@ func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error retrieving droplet: %s", err) } - if droplet.ImageSlug() != "" { - d.Set("image", droplet.ImageSlug()) + if droplet.Image.Slug != "" { + d.Set("image", droplet.Image.Slug) } else { - d.Set("image", droplet.ImageId()) + d.Set("image", droplet.Image.ID) } d.Set("name", droplet.Name) - d.Set("region", droplet.RegionSlug()) - d.Set("size", droplet.SizeSlug) + d.Set("region", droplet.Region.Slug) + d.Set("size", droplet.Size.Slug) d.Set("status", droplet.Status) - d.Set("locked", droplet.IsLocked()) + d.Set("locked", strconv.FormatBool(droplet.Locked)) - if droplet.IPV6Address("public") != "" { + if publicIPv6 := findIPv6AddrByType(droplet, "public"); publicIPv6 != "" { d.Set("ipv6", true) - d.Set("ipv6_address", droplet.IPV6Address("public")) - d.Set("ipv6_address_private", droplet.IPV6Address("private")) + d.Set("ipv6_address", publicIPv6) + d.Set("ipv6_address_private", findIPv6AddrByType(droplet, "private")) } - d.Set("ipv4_address", droplet.IPV4Address("public")) + d.Set("ipv4_address", findIPv4AddrByType(droplet, "public")) - if droplet.NetworkingType() == "private" { + if privateIPv4 := findIPv4AddrByType(droplet, "private"); privateIPv4 != "" { d.Set("private_networking", true) - d.Set("ipv4_address_private", droplet.IPV4Address("private")) + d.Set("ipv4_address_private", privateIPv4) } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", - "host": droplet.IPV4Address("public"), + "host": findIPv4AddrByType(droplet, "public"), }) return nil } +func findIPv6AddrByType(d *godo.Droplet, addrType string) string { + for _, addr := range d.Networks.V6 { + if addr.Type == addrType { + return addr.IPAddress + } + } + return "" +} + +func findIPv4AddrByType(d *godo.Droplet, addrType string) string { + for _, addr := range d.Networks.V4 { + if addr.Type == addrType { + return addr.IPAddress + } + } + return "" +} + func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid droplet id: %v", err) + } if d.HasChange("size") { oldSize, newSize := d.GetChange("size") - err := client.PowerOff(d.Id()) - + _, _, err = client.DropletActions.PowerOff(id) if err != nil && !strings.Contains(err.Error(), "Droplet is already powered off") { return fmt.Errorf( "Error powering off droplet (%s): %s", d.Id(), err) @@ -229,7 +266,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) } // Resize the droplet - err = client.Resize(d.Id(), newSize.(string)) + _, _, err = client.DropletActions.Resize(id, newSize.(string), true) if err != nil { newErr := powerOnAndWait(d, meta) if newErr != nil { @@ -254,7 +291,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) "Error waiting for resize droplet (%s) to finish: %s", d.Id(), err) } - err = client.PowerOn(d.Id()) + _, _, err = client.DropletActions.PowerOn(id) if err != nil { return fmt.Errorf( @@ -272,7 +309,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) oldName, newName := d.GetChange("name") // Rename the droplet - err := client.Rename(d.Id(), newName.(string)) + _, _, err = client.DropletActions.Rename(id, newName.(string)) if err != nil { return fmt.Errorf( @@ -292,7 +329,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) // As there is no way to disable private networking, // we only check if it needs to be enabled if d.HasChange("private_networking") && d.Get("private_networking").(bool) { - err := client.EnablePrivateNetworking(d.Id()) + _, _, err = client.DropletActions.EnablePrivateNetworking(id) if err != nil { return fmt.Errorf( @@ -309,7 +346,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) // As there is no way to disable IPv6, we only check if it needs to be enabled if d.HasChange("ipv6") && d.Get("ipv6").(bool) { - err := client.EnableIPV6s(d.Id()) + _, _, err = client.DropletActions.EnableIPv6(id) if err != nil { return fmt.Errorf( @@ -330,9 +367,14 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) } func resourceDigitalOceanDropletDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) - _, err := WaitForDropletAttribute( + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid droplet id: %v", err) + } + + _, err = WaitForDropletAttribute( d, "false", []string{"", "true"}, "locked", meta) if err != nil { @@ -343,7 +385,7 @@ func resourceDigitalOceanDropletDelete(d *schema.ResourceData, meta interface{}) log.Printf("[INFO] Deleting droplet: %s", d.Id()) // Destroy the droplet - err = client.DestroyDroplet(d.Id()) + _, err = client.Droplets.Delete(id) // Handle remotely destroyed droplets if err != nil && strings.Contains(err.Error(), "404 Not Found") { @@ -386,9 +428,14 @@ func WaitForDropletAttribute( // cleaner and more efficient func newDropletStateRefreshFunc( d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) return func() (interface{}, string, error) { - err := resourceDigitalOceanDropletRead(d, meta) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, "", err + } + + err = resourceDigitalOceanDropletRead(d, meta) if err != nil { return nil, "", err } @@ -404,7 +451,7 @@ func newDropletStateRefreshFunc( // See if we can access our attribute if attr, ok := d.GetOk(attribute); ok { // Retrieve the droplet properties - droplet, err := client.RetrieveDroplet(d.Id()) + droplet, _, err := client.Droplets.Get(id) if err != nil { return nil, "", fmt.Errorf("Error retrieving droplet: %s", err) } @@ -418,8 +465,13 @@ func newDropletStateRefreshFunc( // Powers on the droplet and waits for it to be active func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) - err := client.PowerOn(d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid droplet id: %v", err) + } + + client := meta.(*godo.Client) + _, _, err = client.DropletActions.PowerOn(id) if err != nil { return err } diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go index 587612e01..730718c3f 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go @@ -2,16 +2,17 @@ package digitalocean import ( "fmt" + "strconv" "strings" "testing" + "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "github.com/pearkes/digitalocean" ) func TestAccDigitalOceanDroplet_Basic(t *testing.T) { - var droplet digitalocean.Droplet + var droplet godo.Droplet resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -40,7 +41,7 @@ func TestAccDigitalOceanDroplet_Basic(t *testing.T) { } func TestAccDigitalOceanDroplet_Update(t *testing.T) { - var droplet digitalocean.Droplet + var droplet godo.Droplet resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -71,7 +72,7 @@ func TestAccDigitalOceanDroplet_Update(t *testing.T) { } func TestAccDigitalOceanDroplet_PrivateNetworkingIpv6(t *testing.T) { - var droplet digitalocean.Droplet + var droplet godo.Droplet resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -94,15 +95,20 @@ func TestAccDigitalOceanDroplet_PrivateNetworkingIpv6(t *testing.T) { } func testAccCheckDigitalOceanDropletDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*digitalocean.Client) + client := testAccProvider.Meta().(*godo.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "digitalocean_droplet" { continue } + id, err := strconv.Atoi(rs.Primary.ID) + if err != nil { + return err + } + // Try to find the Droplet - _, err := client.RetrieveDroplet(rs.Primary.ID) + _, _, err = client.Droplets.Get(id) // Wait @@ -116,19 +122,19 @@ func testAccCheckDigitalOceanDropletDestroy(s *terraform.State) error { return nil } -func testAccCheckDigitalOceanDropletAttributes(droplet *digitalocean.Droplet) resource.TestCheckFunc { +func testAccCheckDigitalOceanDropletAttributes(droplet *godo.Droplet) resource.TestCheckFunc { return func(s *terraform.State) error { - if droplet.ImageSlug() != "centos-5-8-x32" { - return fmt.Errorf("Bad image_slug: %s", droplet.ImageSlug()) + if droplet.Image.Slug != "centos-5-8-x32" { + return fmt.Errorf("Bad image_slug: %s", droplet.Image.Slug) } - if droplet.SizeSlug != "512mb" { - return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) + if droplet.Size.Slug != "512mb" { + return fmt.Errorf("Bad size_slug: %s", droplet.Size.Slug) } - if droplet.RegionSlug() != "nyc3" { - return fmt.Errorf("Bad region_slug: %s", droplet.RegionSlug()) + if droplet.Region.Slug != "nyc3" { + return fmt.Errorf("Bad region_slug: %s", droplet.Region.Slug) } if droplet.Name != "foo" { @@ -138,10 +144,10 @@ func testAccCheckDigitalOceanDropletAttributes(droplet *digitalocean.Droplet) re } } -func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *digitalocean.Droplet) resource.TestCheckFunc { +func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *godo.Droplet) resource.TestCheckFunc { return func(s *terraform.State) error { - if droplet.SizeSlug != "1gb" { + if droplet.Size.Slug != "1gb" { return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) } @@ -153,50 +159,46 @@ func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *digitalocean.Drop } } -func testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(droplet *digitalocean.Droplet) resource.TestCheckFunc { +func testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(droplet *godo.Droplet) resource.TestCheckFunc { return func(s *terraform.State) error { - if droplet.ImageSlug() != "centos-5-8-x32" { - return fmt.Errorf("Bad image_slug: %s", droplet.ImageSlug()) + if droplet.Image.Slug != "centos-5-8-x32" { + return fmt.Errorf("Bad image_slug: %s", droplet.Image.Slug) } - if droplet.SizeSlug != "1gb" { - return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) + if droplet.Size.Slug != "1gb" { + return fmt.Errorf("Bad size_slug: %s", droplet.Size.Slug) } - if droplet.RegionSlug() != "sgp1" { - return fmt.Errorf("Bad region_slug: %s", droplet.RegionSlug()) + if droplet.Region.Slug != "sgp1" { + return fmt.Errorf("Bad region_slug: %s", droplet.Region.Slug) } if droplet.Name != "baz" { return fmt.Errorf("Bad name: %s", droplet.Name) } - if droplet.IPV4Address("private") == "" { - return fmt.Errorf("No ipv4 private: %s", droplet.IPV4Address("private")) + if findIPv4AddrByType(droplet, "private") == "" { + return fmt.Errorf("No ipv4 private: %s", findIPv4AddrByType(droplet, "private")) } // if droplet.IPV6Address("private") == "" { // return fmt.Errorf("No ipv6 private: %s", droplet.IPV6Address("private")) // } - if droplet.NetworkingType() != "private" { - return fmt.Errorf("Bad networking type: %s", droplet.NetworkingType()) + if findIPv4AddrByType(droplet, "public") == "" { + return fmt.Errorf("No ipv4 public: %s", findIPv4AddrByType(droplet, "public")) } - if droplet.IPV4Address("public") == "" { - return fmt.Errorf("No ipv4 public: %s", droplet.IPV4Address("public")) - } - - if droplet.IPV6Address("public") == "" { - return fmt.Errorf("No ipv6 public: %s", droplet.IPV6Address("public")) + if findIPv6AddrByType(droplet, "public") == "" { + return fmt.Errorf("No ipv6 public: %s", findIPv6AddrByType(droplet, "public")) } return nil } } -func testAccCheckDigitalOceanDropletExists(n string, droplet *digitalocean.Droplet) resource.TestCheckFunc { +func testAccCheckDigitalOceanDropletExists(n string, droplet *godo.Droplet) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -207,19 +209,25 @@ func testAccCheckDigitalOceanDropletExists(n string, droplet *digitalocean.Dropl return fmt.Errorf("No Droplet ID is set") } - client := testAccProvider.Meta().(*digitalocean.Client) + client := testAccProvider.Meta().(*godo.Client) - retrieveDroplet, err := client.RetrieveDroplet(rs.Primary.ID) + id, err := strconv.Atoi(rs.Primary.ID) + if err != nil { + return err + } + + // Try to find the Droplet + retrieveDroplet, _, err := client.Droplets.Get(id) if err != nil { return err } - if retrieveDroplet.StringId() != rs.Primary.ID { + if strconv.Itoa(retrieveDroplet.ID) != rs.Primary.ID { return fmt.Errorf("Droplet not found") } - *droplet = retrieveDroplet + *droplet = *retrieveDroplet return nil } @@ -230,7 +238,7 @@ func testAccCheckDigitalOceanDropletExists(n string, droplet *digitalocean.Dropl // other test already // //func Test_new_droplet_state_refresh_func(t *testing.T) { -// droplet := digitalocean.Droplet{ +// droplet := godo.Droplet{ // Name: "foobar", // } // resourceMap, _ := resource_digitalocean_droplet_update_state( diff --git a/builtin/providers/digitalocean/resource_digitalocean_record.go b/builtin/providers/digitalocean/resource_digitalocean_record.go index 2ff095aae..ebcb2e0f8 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_record.go +++ b/builtin/providers/digitalocean/resource_digitalocean_record.go @@ -3,10 +3,11 @@ package digitalocean import ( "fmt" "log" + "strconv" "strings" + "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/schema" - "github.com/pearkes/digitalocean" ) func resourceDigitalOceanRecord() *schema.Resource { @@ -66,34 +67,55 @@ func resourceDigitalOceanRecord() *schema.Resource { } func resourceDigitalOceanRecordCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) - newRecord := digitalocean.CreateRecord{ - Type: d.Get("type").(string), - Name: d.Get("name").(string), - Data: d.Get("value").(string), - Priority: d.Get("priority").(string), - Port: d.Get("port").(string), - Weight: d.Get("weight").(string), + newRecord := godo.DomainRecordEditRequest{ + Type: d.Get("type").(string), + Name: d.Get("name").(string), + Data: d.Get("value").(string), + } + + var err error + if priority := d.Get("priority").(string); priority != "" { + newRecord.Priority, err = strconv.Atoi(priority) + if err != nil { + return fmt.Errorf("Failed to parse priority as an integer: %v", err) + } + } + if port := d.Get("port").(string); port != "" { + newRecord.Port, err = strconv.Atoi(port) + if err != nil { + return fmt.Errorf("Failed to parse port as an integer: %v", err) + } + } + if weight := d.Get("weight").(string); weight != "" { + newRecord.Weight, err = strconv.Atoi(weight) + if err != nil { + return fmt.Errorf("Failed to parse weight as an integer: %v", err) + } } log.Printf("[DEBUG] record create configuration: %#v", newRecord) - recId, err := client.CreateRecord(d.Get("domain").(string), &newRecord) + rec, _, err := client.Domains.CreateRecord(d.Get("domain").(string), &newRecord) if err != nil { return fmt.Errorf("Failed to create record: %s", err) } - d.SetId(recId) + d.SetId(strconv.Itoa(rec.ID)) log.Printf("[INFO] Record ID: %s", d.Id()) return resourceDigitalOceanRecordRead(d, meta) } func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) domain := d.Get("domain").(string) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid record ID: %v", err) + } - rec, err := client.RetrieveRecord(domain, d.Id()) + rec, _, err := client.Domains.Record(domain, id) if err != nil { // If the record is somehow already destroyed, mark as // successfully gone @@ -120,23 +142,29 @@ func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) er d.Set("name", rec.Name) d.Set("type", rec.Type) d.Set("value", rec.Data) - d.Set("weight", rec.StringWeight()) - d.Set("priority", rec.StringPriority()) - d.Set("port", rec.StringPort()) + d.Set("weight", strconv.Itoa(rec.Weight)) + d.Set("priority", strconv.Itoa(rec.Priority)) + d.Set("port", strconv.Itoa(rec.Port)) return nil } func resourceDigitalOceanRecordUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) - var updateRecord digitalocean.UpdateRecord - if v, ok := d.GetOk("name"); ok { - updateRecord.Name = v.(string) + domain := d.Get("domain").(string) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid record ID: %v", err) } - log.Printf("[DEBUG] record update configuration: %#v", updateRecord) - err := client.UpdateRecord(d.Get("domain").(string), d.Id(), &updateRecord) + var editRecord godo.DomainRecordEditRequest + if v, ok := d.GetOk("name"); ok { + editRecord.Name = v.(string) + } + + log.Printf("[DEBUG] record update configuration: %#v", editRecord) + _, _, err = client.Domains.EditRecord(domain, id, &editRecord) if err != nil { return fmt.Errorf("Failed to update record: %s", err) } @@ -145,11 +173,17 @@ func resourceDigitalOceanRecordUpdate(d *schema.ResourceData, meta interface{}) } func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) - log.Printf( - "[INFO] Deleting record: %s, %s", d.Get("domain").(string), d.Id()) - err := client.DestroyRecord(d.Get("domain").(string), d.Id()) + domain := d.Get("domain").(string) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid record ID: %v", err) + } + + log.Printf("[INFO] Deleting record: %s, %d", domain, id) + + _, err = client.Domains.DeleteRecord(domain, id) if err != nil { // If the record is somehow already destroyed, mark as // successfully gone diff --git a/builtin/providers/digitalocean/resource_digitalocean_record_test.go b/builtin/providers/digitalocean/resource_digitalocean_record_test.go index 139fd30b7..7811ee9c8 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_record_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_record_test.go @@ -2,15 +2,16 @@ package digitalocean import ( "fmt" + "strconv" "testing" + "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "github.com/pearkes/digitalocean" ) func TestAccDigitalOceanRecord_Basic(t *testing.T) { - var record digitalocean.Record + var record godo.DomainRecord resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -35,7 +36,7 @@ func TestAccDigitalOceanRecord_Basic(t *testing.T) { } func TestAccDigitalOceanRecord_Updated(t *testing.T) { - var record digitalocean.Record + var record godo.DomainRecord resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -77,7 +78,7 @@ func TestAccDigitalOceanRecord_Updated(t *testing.T) { } func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) { - var record digitalocean.Record + var record godo.DomainRecord resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -104,7 +105,7 @@ func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) { } func TestAccDigitalOceanRecord_RelativeHostnameValue(t *testing.T) { - var record digitalocean.Record + var record godo.DomainRecord resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -131,7 +132,7 @@ func TestAccDigitalOceanRecord_RelativeHostnameValue(t *testing.T) { } func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) { - var record digitalocean.Record + var record godo.DomainRecord resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -158,14 +159,19 @@ func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) { } func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*digitalocean.Client) + client := testAccProvider.Meta().(*godo.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "digitalocean_record" { continue } + domain := rs.Primary.Attributes["domain"] + id, err := strconv.Atoi(rs.Primary.ID) + if err != nil { + return err + } - _, err := client.RetrieveRecord(rs.Primary.Attributes["domain"], rs.Primary.ID) + _, _, err = client.Domains.Record(domain, id) if err == nil { return fmt.Errorf("Record still exists") @@ -175,7 +181,7 @@ func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error { return nil } -func testAccCheckDigitalOceanRecordAttributes(record *digitalocean.Record) resource.TestCheckFunc { +func testAccCheckDigitalOceanRecordAttributes(record *godo.DomainRecord) resource.TestCheckFunc { return func(s *terraform.State) error { if record.Data != "192.168.0.10" { @@ -186,7 +192,7 @@ func testAccCheckDigitalOceanRecordAttributes(record *digitalocean.Record) resou } } -func testAccCheckDigitalOceanRecordAttributesUpdated(record *digitalocean.Record) resource.TestCheckFunc { +func testAccCheckDigitalOceanRecordAttributesUpdated(record *godo.DomainRecord) resource.TestCheckFunc { return func(s *terraform.State) error { if record.Data != "192.168.0.11" { @@ -197,7 +203,7 @@ func testAccCheckDigitalOceanRecordAttributesUpdated(record *digitalocean.Record } } -func testAccCheckDigitalOceanRecordExists(n string, record *digitalocean.Record) resource.TestCheckFunc { +func testAccCheckDigitalOceanRecordExists(n string, record *godo.DomainRecord) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -209,25 +215,31 @@ func testAccCheckDigitalOceanRecordExists(n string, record *digitalocean.Record) return fmt.Errorf("No Record ID is set") } - client := testAccProvider.Meta().(*digitalocean.Client) + client := testAccProvider.Meta().(*godo.Client) - foundRecord, err := client.RetrieveRecord(rs.Primary.Attributes["domain"], rs.Primary.ID) + domain := rs.Primary.Attributes["domain"] + id, err := strconv.Atoi(rs.Primary.ID) + if err != nil { + return err + } + + foundRecord, _, err := client.Domains.Record(domain, id) if err != nil { return err } - if foundRecord.StringId() != rs.Primary.ID { + if strconv.Itoa(foundRecord.ID) != rs.Primary.ID { return fmt.Errorf("Record not found") } - *record = foundRecord + *record = *foundRecord return nil } } -func testAccCheckDigitalOceanRecordAttributesHostname(data string, record *digitalocean.Record) resource.TestCheckFunc { +func testAccCheckDigitalOceanRecordAttributesHostname(data string, record *godo.DomainRecord) resource.TestCheckFunc { return func(s *terraform.State) error { if record.Data != data { diff --git a/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go b/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go index 96a4ad80d..d6eb96f09 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go +++ b/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go @@ -3,10 +3,11 @@ package digitalocean import ( "fmt" "log" + "strconv" "strings" + "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/schema" - "github.com/pearkes/digitalocean" ) func resourceDigitalOceanSSHKey() *schema.Resource { @@ -42,30 +43,35 @@ func resourceDigitalOceanSSHKey() *schema.Resource { } func resourceDigitalOceanSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) // Build up our creation options - opts := &digitalocean.CreateSSHKey{ + opts := &godo.KeyCreateRequest{ Name: d.Get("name").(string), PublicKey: d.Get("public_key").(string), } log.Printf("[DEBUG] SSH Key create configuration: %#v", opts) - id, err := client.CreateSSHKey(opts) + key, _, err := client.Keys.Create(opts) if err != nil { return fmt.Errorf("Error creating SSH Key: %s", err) } - d.SetId(id) - log.Printf("[INFO] SSH Key: %s", id) + d.SetId(strconv.Itoa(key.ID)) + log.Printf("[INFO] SSH Key: %d", key.ID) return resourceDigitalOceanSSHKeyRead(d, meta) } func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) - key, err := client.RetrieveSSHKey(d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid SSH key id: %v", err) + } + + key, _, err := client.Keys.GetByID(id) if err != nil { // If the key is somehow already destroyed, mark as // successfully gone @@ -84,7 +90,12 @@ func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) er } func resourceDigitalOceanSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid SSH key id: %v", err) + } var newName string if v, ok := d.GetOk("name"); ok { @@ -92,7 +103,10 @@ func resourceDigitalOceanSSHKeyUpdate(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] SSH key update name: %#v", newName) - err := client.RenameSSHKey(d.Id(), newName) + opts := &godo.KeyUpdateRequest{ + Name: newName, + } + _, _, err = client.Keys.UpdateByID(id, opts) if err != nil { return fmt.Errorf("Failed to update SSH key: %s", err) } @@ -101,10 +115,15 @@ func resourceDigitalOceanSSHKeyUpdate(d *schema.ResourceData, meta interface{}) } func resourceDigitalOceanSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*digitalocean.Client) + client := meta.(*godo.Client) - log.Printf("[INFO] Deleting SSH key: %s", d.Id()) - err := client.DestroySSHKey(d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("invalid SSH key id: %v", err) + } + + log.Printf("[INFO] Deleting SSH key: %d", id) + _, err = client.Keys.DeleteByID(id) if err != nil { return fmt.Errorf("Error deleting SSH key: %s", err) } diff --git a/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go b/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go index 009366e18..3aebe1821 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go @@ -6,13 +6,13 @@ import ( "strings" "testing" + "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "github.com/pearkes/digitalocean" ) func TestAccDigitalOceanSSHKey_Basic(t *testing.T) { - var key digitalocean.SSHKey + var key godo.Key resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -35,15 +35,20 @@ func TestAccDigitalOceanSSHKey_Basic(t *testing.T) { } func testAccCheckDigitalOceanSSHKeyDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*digitalocean.Client) + client := testAccProvider.Meta().(*godo.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "digitalocean_ssh_key" { continue } + id, err := strconv.Atoi(rs.Primary.ID) + if err != nil { + return err + } + // Try to find the key - _, err := client.RetrieveSSHKey(rs.Primary.ID) + _, _, err = client.Keys.GetByID(id) if err == nil { fmt.Errorf("SSH key still exists") @@ -53,7 +58,7 @@ func testAccCheckDigitalOceanSSHKeyDestroy(s *terraform.State) error { return nil } -func testAccCheckDigitalOceanSSHKeyAttributes(key *digitalocean.SSHKey) resource.TestCheckFunc { +func testAccCheckDigitalOceanSSHKeyAttributes(key *godo.Key) resource.TestCheckFunc { return func(s *terraform.State) error { if key.Name != "foobar" { @@ -64,7 +69,7 @@ func testAccCheckDigitalOceanSSHKeyAttributes(key *digitalocean.SSHKey) resource } } -func testAccCheckDigitalOceanSSHKeyExists(n string, key *digitalocean.SSHKey) resource.TestCheckFunc { +func testAccCheckDigitalOceanSSHKeyExists(n string, key *godo.Key) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -76,19 +81,25 @@ func testAccCheckDigitalOceanSSHKeyExists(n string, key *digitalocean.SSHKey) re return fmt.Errorf("No Record ID is set") } - client := testAccProvider.Meta().(*digitalocean.Client) + client := testAccProvider.Meta().(*godo.Client) - foundKey, err := client.RetrieveSSHKey(rs.Primary.ID) + id, err := strconv.Atoi(rs.Primary.ID) + if err != nil { + return err + } + + // Try to find the key + foundKey, _, err := client.Keys.GetByID(id) if err != nil { return err } - if strconv.Itoa(int(foundKey.Id)) != rs.Primary.ID { + if strconv.Itoa(foundKey.ID) != rs.Primary.ID { return fmt.Errorf("Record not found") } - *key = foundKey + *key = *foundKey return nil } From e0632de30c3582520444699da181989d88c5907b Mon Sep 17 00:00:00 2001 From: Kazunori Kojima Date: Fri, 2 Oct 2015 01:49:32 +0900 Subject: [PATCH 008/100] Add support S3 CORS --- .../providers/aws/resource_aws_s3_bucket.go | 117 ++++++++++++++++++ .../aws/resource_aws_s3_bucket_test.go | 62 ++++++++++ .../providers/aws/r/s3_bucket.html.markdown | 26 ++++ 3 files changed, 205 insertions(+) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index a329d4ff6..93105ec51 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -41,6 +41,39 @@ func resourceAwsS3Bucket() *schema.Resource { StateFunc: normalizeJson, }, + "cors_rule": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_headers": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "allowed_methods": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "allowed_origins": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "expose_headers": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "max_age_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "website": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -168,6 +201,12 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("cors_rule") { + if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil { + return err + } + } + if d.HasChange("website") { if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { return err @@ -221,6 +260,25 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { } } + // Read the CORS + cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ + Bucket: aws.String(d.Id()), + }) + log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) + if err != nil { + rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) + for _, ruleObject := range cors.CORSRules { + rule := make(map[string]interface{}) + rule["allowed_headers"] = ruleObject.AllowedHeaders + rule["allowed_methods"] = ruleObject.AllowedMethods + rule["allowed_origins"] = ruleObject.AllowedOrigins + rule["expose_headers"] = ruleObject.ExposeHeaders + rule["max_age_seconds"] = ruleObject.MaxAgeSeconds + rules = append(rules, rule) + } + d.Set("cors_rule", rules) + } + // Read the website configuration ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ Bucket: aws.String(d.Id()), @@ -400,6 +458,65 @@ func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) erro return nil } +func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + rawCors := d.Get("cors_rule").([]interface{}) + + if len(rawCors) == 0 { + // Delete CORS + log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket) + _, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ + Bucket: aws.String(bucket), + }) + if err != nil { + return fmt.Errorf("Error deleting S3 CORS: %s", err) + } + } else { + // Put CORS + rules := make([]*s3.CORSRule, 0, len(rawCors)) + for _, cors := range rawCors { + corsMap := cors.(map[string]interface{}) + r := &s3.CORSRule{} + for k, v := range corsMap { + log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v) + if k == "max_age_seconds" { + r.MaxAgeSeconds = aws.Int64(int64(v.(int))) + } else { + vMap := make([]*string, len(v.([]interface{}))) + for i, vv := range v.([]interface{}) { + str := vv.(string) + vMap[i] = aws.String(str) + } + switch k { + case "allowed_headers": + r.AllowedHeaders = vMap + case "allowed_methods": + r.AllowedMethods = vMap + case "allowed_origins": + r.AllowedOrigins = vMap + case "expose_headers": + r.ExposeHeaders = vMap + } + } + } + rules = append(rules, r) + } + corsInput := &s3.PutBucketCorsInput{ + Bucket: aws.String(bucket), + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: rules, + }, + } + log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput) + _, err := s3conn.PutBucketCors(corsInput) + if err != nil { + return fmt.Errorf("Error putting S3 CORS: %s", err) + } + } + + return nil +} + func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { ws := d.Get("website").([]interface{}) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_test.go b/builtin/providers/aws/resource_aws_s3_bucket_test.go index e494816b3..4a969365a 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_test.go @@ -188,6 +188,34 @@ func TestAccAWSS3Bucket_Versioning(t *testing.T) { }) } +func TestAccAWSS3Bucket_Cors(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSS3BucketConfigWithCORS, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"), + testAccCheckAWSS3BucketCors( + "aws_s3_bucket.bucket", + []*s3.CORSRule{ + &s3.CORSRule{ + AllowedHeaders: []*string{aws.String("*")}, + AllowedMethods: []*string{aws.String("PUT"), aws.String("POST")}, + AllowedOrigins: []*string{aws.String("https://www.example.com")}, + ExposeHeaders: []*string{aws.String("x-amz-server-side-encryption"), aws.String("ETag")}, + MaxAgeSeconds: aws.Int64(3000), + }, + }, + ), + ), + }, + }, + }) +} + func testAccCheckAWSS3BucketDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).s3conn @@ -370,6 +398,26 @@ func testAccCheckAWSS3BucketVersioning(n string, versioningStatus string) resour return nil } } +func testAccCheckAWSS3BucketCors(n string, corsRules []*s3.CORSRule) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, _ := s.RootModule().Resources[n] + conn := testAccProvider.Meta().(*AWSClient).s3conn + + out, err := conn.GetBucketCors(&s3.GetBucketCorsInput{ + Bucket: aws.String(rs.Primary.ID), + }) + + if err != nil { + return fmt.Errorf("GetBucketCors error: %v", err) + } + + if !reflect.DeepEqual(out.CORSRules, corsRules) { + return fmt.Errorf("bad error cors rule, expected: %v, got %v", corsRules, out.CORSRules) + } + + return nil + } +} // These need a bit of randomness as the name can only be used once globally // within AWS @@ -452,3 +500,17 @@ resource "aws_s3_bucket" "bucket" { } } `, randInt) + +var testAccAWSS3BucketConfigWithCORS = fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = "tf-test-bucket-%d" + acl = "public-read" + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT","POST"] + allowed_origins = ["https://www.example.com"] + expose_headers = ["x-amz-server-side-encryption","ETag"] + max_age_seconds = 3000 + } +} +`, randInt) diff --git a/website/source/docs/providers/aws/r/s3_bucket.html.markdown b/website/source/docs/providers/aws/r/s3_bucket.html.markdown index 011f73347..da008053c 100644 --- a/website/source/docs/providers/aws/r/s3_bucket.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket.html.markdown @@ -41,6 +41,23 @@ resource "aws_s3_bucket" "b" { } ``` +### Using CORS + +``` +resource "aws_s3_bucket" "b" { + bucket = "s3-website-test.hashicorp.com" + acl = "public-read" + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT","POST"] + allowed_origins = ["https://s3-website-test.hashicorp.com"] + expose_headers = ["ETag"] + max_age_seconds = 3000 + } +} +``` + ### Using versioning ``` @@ -64,6 +81,7 @@ The following arguments are supported: * `tags` - (Optional) A mapping of tags to assign to the bucket. * `force_destroy` - (Optional, Default:false ) A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. * `website` - (Optional) A website object (documented below). +* `cors_rule` - (Optional) A rule of [Cross-Origin Resource Sharing](http://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below). * `versioning` - (Optional) A state of [versioning](http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below) The website object supports the following: @@ -72,6 +90,14 @@ The website object supports the following: * `error_document` - (Optional) An absolute path to the document to return in case of a 4XX error. * `redirect_all_requests_to` - (Optional) A hostname to redirect all website requests for this bucket to. +The CORS supports the following: + +* `allowed_headers` (Optional) Specifies which headers are allowed. +* `allowed_methods` (Required) Specifies which methods are allowed. Can be `GET`, `PUT`, `POST`, `DELETE` or `HEAD`. +* `allowed_origins` (Required) Specifies which origins are allowed. +* `expose_headers` (Optional) Specifies expose header in the response. +* `max_age_seconds` (Optional) Specifies time in seconds that browser can cache the response for a preflight request. + The versioning supports the following: * `enabled` - (Optional) Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. From 61948f35d25076d19c14d02ccbc09c7c8fcdf0f8 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 6 Jul 2015 23:45:47 +0100 Subject: [PATCH 009/100] provider/aws: Add docs for aws_cloudformation_stack --- .../aws/r/cloudformation_stack.html.markdown | 63 +++++++++++++++++++ website/source/layouts/aws.erb | 8 +++ 2 files changed, 71 insertions(+) create mode 100644 website/source/docs/providers/aws/r/cloudformation_stack.html.markdown diff --git a/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown b/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown new file mode 100644 index 000000000..6a13520a2 --- /dev/null +++ b/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown @@ -0,0 +1,63 @@ +--- +layout: "aws" +page_title: "AWS: aws_cloudformation_stack" +sidebar_current: "docs-aws-resource-cloudformation-stack" +description: |- + Provides a CloudFormation Stack resource. +--- + +# aws\_cloudformation\_stack + +Provides a CloudFormation Stack resource. + +## Example Usage + +``` +resource "aws_cloudformation_stack" "network" { + name = "networking-stack" + template_body = <AWS Provider + > + CloudFormation Resources + + > CloudWatch Resources From 4dfbbe307490d28c2039ed7da8113818e82026f3 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 7 Jul 2015 08:00:05 +0100 Subject: [PATCH 010/100] provider/aws: Add implementation for aws_cloudformation_stack --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_cloudformation_stack.go | 451 ++++++++++++++++++ builtin/providers/aws/structure.go | 55 +++ 4 files changed, 512 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_cloudformation_stack.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index f8f443b73..bbbad7eea 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/directoryservice" @@ -47,6 +48,7 @@ type Config struct { } type AWSClient struct { + cfconn *cloudformation.CloudFormation cloudwatchconn *cloudwatch.CloudWatch cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs dsconn *directoryservice.DirectoryService @@ -175,6 +177,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing Lambda Connection") client.lambdaconn = lambda.New(awsConfig) + log.Println("[INFO] Initializing Cloudformation Connection") + client.cfconn = cloudformation.New(awsConfig) + log.Println("[INFO] Initializing CloudWatch SDK connection") client.cloudwatchconn = cloudwatch.New(awsConfig) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index f73580d0f..547f9617a 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -163,6 +163,7 @@ func Provider() terraform.ResourceProvider { "aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), + "aws_cloudformation_stack": resourceAwsCloudFormationStack(), "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack.go b/builtin/providers/aws/resource_aws_cloudformation_stack.go new file mode 100644 index 000000000..1846a3105 --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudformation_stack.go @@ -0,0 +1,451 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +func resourceAwsCloudFormationStack() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudFormationStackCreate, + Read: resourceAwsCloudFormationStackRead, + Update: resourceAwsCloudFormationStackUpdate, + Delete: resourceAwsCloudFormationStackDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "template_body": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: normalizeJson, + }, + "template_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "capabilities": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "disable_rollback": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "notification_arns": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "on_failure": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "parameters": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Computed: true, + }, + "outputs": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + "policy_body": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: normalizeJson, + }, + "policy_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "timeout_in_minutes": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "tags": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + + input := cloudformation.CreateStackInput{ + StackName: aws.String(d.Get("name").(string)), + } + if v, ok := d.GetOk("template_body"); ok { + input.TemplateBody = aws.String(normalizeJson(v.(string))) + } + if v, ok := d.GetOk("template_url"); ok { + input.TemplateURL = aws.String(v.(string)) + } + if v, ok := d.GetOk("capabilities"); ok { + input.Capabilities = expandStringList(v.(*schema.Set).List()) + } + if v, ok := d.GetOk("disable_rollback"); ok { + input.DisableRollback = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("notification_arns"); ok { + input.NotificationARNs = expandStringList(v.(*schema.Set).List()) + } + if v, ok := d.GetOk("on_failure"); ok { + input.OnFailure = aws.String(v.(string)) + } + if v, ok := d.GetOk("parameters"); ok { + input.Parameters = expandCloudFormationParameters(v.(map[string]interface{})) + } + if v, ok := d.GetOk("policy_body"); ok { + input.StackPolicyBody = aws.String(normalizeJson(v.(string))) + } + if v, ok := d.GetOk("policy_url"); ok { + input.StackPolicyURL = aws.String(v.(string)) + } + if v, ok := d.GetOk("tags"); ok { + input.Tags = expandCloudFormationTags(v.(map[string]interface{})) + } + if v, ok := d.GetOk("timeout_in_minutes"); ok { + input.TimeoutInMinutes = aws.Int64(int64(v.(int))) + } + + log.Printf("[DEBUG] Creating CloudFormation Stack: %s", input) + resp, err := conn.CreateStack(&input) + if err != nil { + return fmt.Errorf("Creating CloudFormation stack failed: %s", err.Error()) + } + + d.SetId(*resp.StackId) + + wait := resource.StateChangeConf{ + Pending: []string{"CREATE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS", "ROLLBACK_COMPLETE"}, + Target: "CREATE_COMPLETE", + Timeout: 30 * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Get("name").(string)), + }) + status := *resp.Stacks[0].StackStatus + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + if status == "ROLLBACK_COMPLETE" { + stack := resp.Stacks[0] + failures, err := getCloudFormationFailures(stack.StackName, *stack.CreationTime, conn) + if err != nil { + return resp, "", fmt.Errorf( + "Failed getting details about rollback: %q", err.Error()) + } + + return resp, "", fmt.Errorf("ROLLBACK_COMPLETE:\n%q", failures) + } + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[INFO] CloudFormation Stack %q created", d.Get("name").(string)) + + return resourceAwsCloudFormationStackRead(d, meta) +} + +func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + stackName := d.Get("name").(string) + + input := &cloudformation.DescribeStacksInput{ + StackName: aws.String(stackName), + } + resp, err := conn.DescribeStacks(input) + if err != nil { + return err + } + + stacks := resp.Stacks + if len(stacks) < 1 { + return nil + } + + tInput := cloudformation.GetTemplateInput{ + StackName: aws.String(stackName), + } + out, err := conn.GetTemplate(&tInput) + if err != nil { + return err + } + + d.Set("template_body", normalizeJson(*out.TemplateBody)) + + stack := stacks[0] + log.Printf("[DEBUG] Received CloudFormation stack: %s", stack) + + d.Set("name", stack.StackName) + d.Set("arn", stack.StackId) + + if stack.TimeoutInMinutes != nil { + d.Set("timeout_in_minutes", int(*stack.TimeoutInMinutes)) + } + if stack.Description != nil { + d.Set("description", stack.Description) + } + if stack.DisableRollback != nil { + d.Set("disable_rollback", stack.DisableRollback) + } + if len(stack.NotificationARNs) > 0 { + err = d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs))) + if err != nil { + return err + } + } + + originalParams := d.Get("parameters").(map[string]interface{}) + err = d.Set("parameters", flattenCloudFormationParameters(stack.Parameters, originalParams)) + if err != nil { + return err + } + + err = d.Set("tags", flattenCloudFormationTags(stack.Tags)) + if err != nil { + return err + } + + err = d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) + if err != nil { + return err + } + + if len(stack.Capabilities) > 0 { + err = d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities))) + if err != nil { + return err + } + } + + return nil +} + +func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + + input := &cloudformation.UpdateStackInput{ + StackName: aws.String(d.Get("name").(string)), + } + + if d.HasChange("template_body") { + input.TemplateBody = aws.String(normalizeJson(d.Get("template_body").(string))) + } + if d.HasChange("template_url") { + input.TemplateURL = aws.String(d.Get("template_url").(string)) + } + if d.HasChange("capabilities") { + input.Capabilities = expandStringList(d.Get("capabilities").(*schema.Set).List()) + } + if d.HasChange("notification_arns") { + input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List()) + } + if d.HasChange("parameters") { + input.Parameters = expandCloudFormationParameters(d.Get("parameters").(map[string]interface{})) + } + if d.HasChange("policy_body") { + input.StackPolicyBody = aws.String(normalizeJson(d.Get("policy_body").(string))) + } + if d.HasChange("policy_url") { + input.StackPolicyURL = aws.String(d.Get("policy_url").(string)) + } + + log.Printf("[DEBUG] Updating CloudFormation stack: %s", input) + stack, err := conn.UpdateStack(input) + if err != nil { + return err + } + + lastUpdatedTime, err := getLastCfEventTimestamp(d.Get("name").(string), conn) + if err != nil { + return err + } + + wait := resource.StateChangeConf{ + Pending: []string{ + "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_IN_PROGRESS", + "UPDATE_ROLLBACK_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE", + }, + Target: "UPDATE_COMPLETE", + Timeout: 15 * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Get("name").(string)), + }) + stack := resp.Stacks[0] + status := *stack.StackStatus + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + if status == "UPDATE_ROLLBACK_COMPLETE" { + failures, err := getCloudFormationFailures(stack.StackName, *lastUpdatedTime, conn) + if err != nil { + return resp, "", fmt.Errorf( + "Failed getting details about rollback: %q", err.Error()) + } + + return resp, "", fmt.Errorf( + "UPDATE_ROLLBACK_COMPLETE:\n%q", failures) + } + + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[DEBUG] CloudFormation stack %q has been updated", *stack.StackId) + + return resourceAwsCloudFormationStackRead(d, meta) +} + +func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + + input := &cloudformation.DeleteStackInput{ + StackName: aws.String(d.Get("name").(string)), + } + log.Printf("[DEBUG] Deleting CloudFormation stack %s", input) + _, err := conn.DeleteStack(input) + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return err + } + + if awsErr.Code() == "ValidationError" { + // Ignore stack which has been already deleted + return nil + } + return err + } + + wait := resource.StateChangeConf{ + Pending: []string{"DELETE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS"}, + Target: "DELETE_COMPLETE", + Timeout: 30 * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Get("name").(string)), + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return resp, "DELETE_FAILED", err + } + + log.Printf("[DEBUG] Error when deleting CloudFormation stack: %s: %s", + awsErr.Code(), awsErr.Message()) + + if awsErr.Code() == "ValidationError" { + return resp, "DELETE_COMPLETE", nil + } + } + + if len(resp.Stacks) == 0 { + log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Get("name")) + return resp, "DELETE_COMPLETE", nil + } + + status := *resp.Stacks[0].StackStatus + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[DEBUG] CloudFormation stack %q has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +// getLastCfEventTimestamp takes the first event in a list +// of events ordered from the newest to the oldest +// and extracts timestamp from it +// LastUpdatedTime only provides last >successful< updated time +func getLastCfEventTimestamp(stackName string, conn *cloudformation.CloudFormation) ( + *time.Time, error) { + output, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{ + StackName: aws.String(stackName), + }) + if err != nil { + return nil, err + } + + return output.StackEvents[0].Timestamp, nil +} + +// getCloudFormationFailures returns ResourceStatusReason(s) +// of events that should be failures based on regexp match of status +func getCloudFormationFailures(stackName *string, afterTime time.Time, + conn *cloudformation.CloudFormation) ([]string, error) { + var failures []string + // Only catching failures from last 100 events + // Some extra iteration logic via NextToken could be added + // but in reality it's nearly impossible to generate >100 + // events by a single stack update + events, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{ + StackName: stackName, + }) + + if err != nil { + return nil, err + } + + failRe := regexp.MustCompile("_FAILED$") + rollbackRe := regexp.MustCompile("^ROLLBACK_") + + for _, e := range events.StackEvents { + if (failRe.MatchString(*e.ResourceStatus) || rollbackRe.MatchString(*e.ResourceStatus)) && + e.Timestamp.After(afterTime) && e.ResourceStatusReason != nil { + failures = append(failures, *e.ResourceStatusReason) + } + } + + return failures, nil +} diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index 5976a8ff0..fd581c84a 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecs" @@ -601,3 +602,57 @@ func flattenDSVpcSettings( return []map[string]interface{}{settings} } + +func expandCloudFormationParameters(params map[string]interface{}) []*cloudformation.Parameter { + var cfParams []*cloudformation.Parameter + for k, v := range params { + cfParams = append(cfParams, &cloudformation.Parameter{ + ParameterKey: aws.String(k), + ParameterValue: aws.String(v.(string)), + }) + } + + return cfParams +} + +// flattenCloudFormationParameters is flattening list of +// *cloudformation.Parameters and only returning existing +// parameters to avoid clash with default values +func flattenCloudFormationParameters(cfParams []*cloudformation.Parameter, + originalParams map[string]interface{}) map[string]interface{} { + params := make(map[string]interface{}, len(cfParams)) + for _, p := range cfParams { + _, isConfigured := originalParams[*p.ParameterKey] + if isConfigured { + params[*p.ParameterKey] = *p.ParameterValue + } + } + return params +} + +func expandCloudFormationTags(tags map[string]interface{}) []*cloudformation.Tag { + var cfTags []*cloudformation.Tag + for k, v := range tags { + cfTags = append(cfTags, &cloudformation.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + return cfTags +} + +func flattenCloudFormationTags(cfTags []*cloudformation.Tag) map[string]string { + tags := make(map[string]string, len(cfTags)) + for _, t := range cfTags { + tags[*t.Key] = *t.Value + } + return tags +} + +func flattenCloudFormationOutputs(cfOutputs []*cloudformation.Output) map[string]string { + outputs := make(map[string]string, len(cfOutputs)) + for _, o := range cfOutputs { + outputs[*o.OutputKey] = *o.OutputValue + } + return outputs +} From 7088a0096e920501726c5769b941dfa85d4a38d6 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 13 Jul 2015 07:51:32 +0100 Subject: [PATCH 011/100] provider/aws: Add acceptance tests for aws_cloudformation_stack --- .../resource_aws_cloudformation_stack_test.go | 228 ++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_cloudformation_stack_test.go diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go new file mode 100644 index 000000000..7ad24be34 --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go @@ -0,0 +1,228 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCloudFormation_basic(t *testing.T) { + var stack cloudformation.Stack + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudFormationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudFormationConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFormationStackExists("aws_cloudformation_stack.network", &stack), + ), + }, + }, + }) +} + +func TestAccAWSCloudFormation_defaultParams(t *testing.T) { + var stack cloudformation.Stack + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudFormationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudFormationConfig_defaultParams, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFormationStackExists("aws_cloudformation_stack.asg-demo", &stack), + ), + }, + }, + }) +} + +func TestAccAWSCloudFormation_allAttributes(t *testing.T) { + var stack cloudformation.Stack + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudFormationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudFormationConfig_allAttributes, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack), + ), + }, + }, + }) +} + +func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + rs = rs + return fmt.Errorf("Not found: %s", n) + } + + conn := testAccProvider.Meta().(*AWSClient).cfconn + params := &cloudformation.DescribeStacksInput{ + StackName: aws.String(rs.Primary.ID), + } + resp, err := conn.DescribeStacks(params) + if err != nil { + return err + } + if len(resp.Stacks) == 0 { + return fmt.Errorf("CloudFormation stack not found") + } + + return nil + } +} + +func testAccCheckAWSCloudFormationDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cfconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cloudformation_stack" { + continue + } + + params := cloudformation.DescribeStacksInput{ + StackName: aws.String(rs.Primary.ID), + } + + resp, err := conn.DescribeStacks(¶ms) + + if err == nil { + if len(resp.Stacks) != 0 && + *resp.Stacks[0].StackId == rs.Primary.ID { + return fmt.Errorf("CloudFormation stack still exists: %q", rs.Primary.ID) + } + } + } + + return nil +} + +var testAccAWSCloudFormationConfig = ` +resource "aws_cloudformation_stack" "network" { + name = "tf-networking-stack" + template_body = < Date: Tue, 22 Sep 2015 14:39:49 -0700 Subject: [PATCH 012/100] provider/aws: fix bug with reading GSIs from dynamodb --- .../providers/aws/resource_aws_dynamodb_table.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go index df043ffe0..4193ddcb0 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table.go @@ -571,14 +571,23 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro } } - gsi["projection_type"] = *gsiObject.Projection.ProjectionType - gsi["non_key_attributes"] = gsiObject.Projection.NonKeyAttributes + gsi["projection_type"] = *(gsiObject.Projection.ProjectionType) + + nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes)) + for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes { + nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr) + } + gsi["non_key_attributes"] = nonKeyAttrs gsiList = append(gsiList, gsi) log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"]) } - d.Set("global_secondary_index", gsiList) + err = d.Set("global_secondary_index", gsiList) + if err != nil { + return err + } + d.Set("arn", table.TableArn) return nil From b1d731bd6f24a1aab2ede549a46bf2b9c93be68e Mon Sep 17 00:00:00 2001 From: Joel Moss Date: Wed, 14 Oct 2015 19:05:38 +0100 Subject: [PATCH 013/100] [chef provisioning] When use_policyfile is given, the run list is not used, so don't require it --- builtin/provisioners/chef/resource_provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/provisioners/chef/resource_provisioner.go b/builtin/provisioners/chef/resource_provisioner.go index 7b94486d2..50b5666ee 100644 --- a/builtin/provisioners/chef/resource_provisioner.go +++ b/builtin/provisioners/chef/resource_provisioner.go @@ -180,7 +180,7 @@ func (r *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string if p.NodeName == "" { es = append(es, fmt.Errorf("Key not found: node_name")) } - if p.RunList == nil { + if !p.UsePolicyfile && p.RunList == nil { es = append(es, fmt.Errorf("Key not found: run_list")) } if p.ServerURL == "" { From b2b41192acbf8db54b8aef86be9191d57374f717 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:16:58 -0500 Subject: [PATCH 014/100] provider/google: container test needed bigger instance to pass --- builtin/providers/google/resource_container_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/google/resource_container_cluster_test.go b/builtin/providers/google/resource_container_cluster_test.go index 72f398a07..ea4a5a597 100644 --- a/builtin/providers/google/resource_container_cluster_test.go +++ b/builtin/providers/google/resource_container_cluster_test.go @@ -113,7 +113,7 @@ resource "google_container_cluster" "with_node_config" { } node_config { - machine_type = "f1-micro" + machine_type = "g1-small" disk_size_gb = 15 oauth_scopes = [ "https://www.googleapis.com/auth/compute", From 0efffc67f0015e4e7c6a51135b496bc0b5063e2c Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:17:34 -0500 Subject: [PATCH 015/100] provider/google: storage bucket tests shouldn't not check predefined_acl it was depreceted in https://github.com/hashicorp/terraform/pull/3272 --- builtin/providers/google/resource_storage_bucket_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/builtin/providers/google/resource_storage_bucket_test.go b/builtin/providers/google/resource_storage_bucket_test.go index a7b59c61a..3860fc9a6 100644 --- a/builtin/providers/google/resource_storage_bucket_test.go +++ b/builtin/providers/google/resource_storage_bucket_test.go @@ -52,8 +52,6 @@ func TestAccStorageCustomAttributes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "EU"), resource.TestCheckResourceAttr( @@ -77,8 +75,6 @@ func TestAccStorageBucketUpdate(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "projectPrivate"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( From f6e525e5310db681078b096ee8d7aa74b66b4820 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:36:01 -0500 Subject: [PATCH 016/100] provider/google: one more test that should skip predefined_acl it was depreceted in https://github.com/hashicorp/terraform/pull/3272 --- builtin/providers/google/resource_storage_bucket_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/builtin/providers/google/resource_storage_bucket_test.go b/builtin/providers/google/resource_storage_bucket_test.go index 3860fc9a6..8e8330050 100644 --- a/builtin/providers/google/resource_storage_bucket_test.go +++ b/builtin/providers/google/resource_storage_bucket_test.go @@ -27,8 +27,6 @@ func TestAccStorage_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "projectPrivate"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( From 3fbeb326cd53f75e01350df06eae89e681b54314 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 21:34:07 -0500 Subject: [PATCH 017/100] provider/azure: acc tests fixes * avoid name collisions * update image names --- builtin/providers/azure/provider_test.go | 6 + .../azure/resource_azure_data_disk_test.go | 138 +++++++++--------- .../azure/resource_azure_instance_test.go | 4 +- 3 files changed, 81 insertions(+), 67 deletions(-) diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go index b3feb8392..ca4017aae 100644 --- a/builtin/providers/azure/provider_test.go +++ b/builtin/providers/azure/provider_test.go @@ -3,9 +3,11 @@ package azure import ( "io" "io/ioutil" + "math/rand" "os" "strings" "testing" + "time" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" @@ -193,6 +195,10 @@ func TestAzure_isFile(t *testing.T) { } } +func genRandInt() int { + return rand.New(rand.NewSource(time.Now().UnixNano())).Int() % 100000 +} + // testAzurePublishSettingsStr is a revoked publishsettings file const testAzurePublishSettingsStr = ` diff --git a/builtin/providers/azure/resource_azure_data_disk_test.go b/builtin/providers/azure/resource_azure_data_disk_test.go index dfad26b5e..2c6660f66 100644 --- a/builtin/providers/azure/resource_azure_data_disk_test.go +++ b/builtin/providers/azure/resource_azure_data_disk_test.go @@ -13,6 +13,7 @@ import ( func TestAccAzureDataDisk_basic(t *testing.T) { var disk virtualmachinedisk.DataDiskResponse + name := fmt.Sprintf("terraform-test%d", genRandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -20,13 +21,13 @@ func TestAccAzureDataDisk_basic(t *testing.T) { CheckDestroy: testAccCheckAzureDataDiskDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAzureDataDisk_basic, + Config: testAccAzureDataDisk_basic(name), Check: resource.ComposeTestCheckFunc( testAccCheckAzureDataDiskExists( "azure_data_disk.foo", &disk), testAccCheckAzureDataDiskAttributes(&disk), resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", "terraform-test-0"), + "azure_data_disk.foo", "label", fmt.Sprintf("%s-0", name)), resource.TestCheckResourceAttr( "azure_data_disk.foo", "size", "10"), ), @@ -37,6 +38,7 @@ func TestAccAzureDataDisk_basic(t *testing.T) { func TestAccAzureDataDisk_update(t *testing.T) { var disk virtualmachinedisk.DataDiskResponse + name := fmt.Sprintf("terraform-test%d", genRandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -44,12 +46,12 @@ func TestAccAzureDataDisk_update(t *testing.T) { CheckDestroy: testAccCheckAzureDataDiskDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAzureDataDisk_advanced, + Config: testAccAzureDataDisk_advanced(name), Check: resource.ComposeTestCheckFunc( testAccCheckAzureDataDiskExists( "azure_data_disk.foo", &disk), resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", "terraform-test1-1"), + "azure_data_disk.foo", "label", fmt.Sprintf("%s-1", name)), resource.TestCheckResourceAttr( "azure_data_disk.foo", "lun", "1"), resource.TestCheckResourceAttr( @@ -57,17 +59,17 @@ func TestAccAzureDataDisk_update(t *testing.T) { resource.TestCheckResourceAttr( "azure_data_disk.foo", "caching", "ReadOnly"), resource.TestCheckResourceAttr( - "azure_data_disk.foo", "virtual_machine", "terraform-test1"), + "azure_data_disk.foo", "virtual_machine", name), ), }, resource.TestStep{ - Config: testAccAzureDataDisk_update, + Config: testAccAzureDataDisk_update(name), Check: resource.ComposeTestCheckFunc( testAccCheckAzureDataDiskExists( "azure_data_disk.foo", &disk), resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", "terraform-test1-1"), + "azure_data_disk.foo", "label", fmt.Sprintf("%s-1", name)), resource.TestCheckResourceAttr( "azure_data_disk.foo", "lun", "2"), resource.TestCheckResourceAttr( @@ -168,68 +170,74 @@ func testAccCheckAzureDataDiskDestroy(s *terraform.State) error { return nil } -var testAccAzureDataDisk_basic = fmt.Sprintf(` -resource "azure_instance" "foo" { - name = "terraform-test" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" +func testAccAzureDataDisk_basic(name string) string { + return fmt.Sprintf(` + resource "azure_instance" "foo" { + name = "%s" + image = "Ubuntu Server 14.04 LTS" + size = "Basic_A1" + storage_service_name = "%s" + location = "West US" + username = "terraform" + password = "Pass!admin123" + } + + resource "azure_data_disk" "foo" { + lun = 0 + size = 10 + storage_service_name = "${azure_instance.foo.storage_service_name}" + virtual_machine = "${azure_instance.foo.id}" + }`, name, testAccStorageServiceName) } -resource "azure_data_disk" "foo" { - lun = 0 - size = 10 - storage_service_name = "${azure_instance.foo.storage_service_name}" - virtual_machine = "${azure_instance.foo.id}" -}`, testAccStorageServiceName) +func testAccAzureDataDisk_advanced(name string) string { + return fmt.Sprintf(` + resource "azure_instance" "foo" { + name = "%s" + image = "Ubuntu Server 14.04 LTS" + size = "Basic_A1" + storage_service_name = "%s" + location = "West US" + username = "terraform" + password = "Pass!admin123" + } -var testAccAzureDataDisk_advanced = fmt.Sprintf(` -resource "azure_instance" "foo" { - name = "terraform-test1" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" + resource "azure_data_disk" "foo" { + lun = 1 + size = 10 + caching = "ReadOnly" + storage_service_name = "${azure_instance.foo.storage_service_name}" + virtual_machine = "${azure_instance.foo.id}" + }`, name, testAccStorageServiceName) } -resource "azure_data_disk" "foo" { - lun = 1 - size = 10 - caching = "ReadOnly" - storage_service_name = "${azure_instance.foo.storage_service_name}" - virtual_machine = "${azure_instance.foo.id}" -}`, testAccStorageServiceName) +func testAccAzureDataDisk_update(name string) string { + return fmt.Sprintf(` + resource "azure_instance" "foo" { + name = "%s" + image = "Ubuntu Server 14.04 LTS" + size = "Basic_A1" + storage_service_name = "%s" + location = "West US" + username = "terraform" + password = "Pass!admin123" + } -var testAccAzureDataDisk_update = fmt.Sprintf(` -resource "azure_instance" "foo" { - name = "terraform-test1" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" + resource "azure_instance" "bar" { + name = "terraform-test2" + image = "Ubuntu Server 14.04 LTS" + size = "Basic_A1" + storage_service_name = "${azure_instance.foo.storage_service_name}" + location = "West US" + username = "terraform" + password = "Pass!admin123" + } + + resource "azure_data_disk" "foo" { + lun = 2 + size = 20 + caching = "ReadWrite" + storage_service_name = "${azure_instance.bar.storage_service_name}" + virtual_machine = "${azure_instance.bar.id}" + }`, name, testAccStorageServiceName) } - -resource "azure_instance" "bar" { - name = "terraform-test2" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "${azure_instance.foo.storage_service_name}" - location = "West US" - username = "terraform" - password = "Pass!admin123" -} - -resource "azure_data_disk" "foo" { - lun = 2 - size = 20 - caching = "ReadWrite" - storage_service_name = "${azure_instance.bar.storage_service_name}" - virtual_machine = "${azure_instance.bar.id}" -}`, testAccStorageServiceName) diff --git a/builtin/providers/azure/resource_azure_instance_test.go b/builtin/providers/azure/resource_azure_instance_test.go index 79e712154..7e63486c3 100644 --- a/builtin/providers/azure/resource_azure_instance_test.go +++ b/builtin/providers/azure/resource_azure_instance_test.go @@ -446,7 +446,7 @@ resource "azure_security_group_rule" "foo" { resource "azure_instance" "foo" { name = "terraform-test1" - image = "Windows Server 2012 R2 Datacenter, April 2015" + image = "Windows Server 2012 R2 Datacenter, September 2015" size = "Basic_A1" storage_service_name = "%s" location = "West US" @@ -520,7 +520,7 @@ resource "azure_security_group_rule" "bar" { resource "azure_instance" "foo" { name = "terraform-test1" - image = "Windows Server 2012 R2 Datacenter, April 2015" + image = "Windows Server 2012 R2 Datacenter, September 2015" size = "Basic_A2" storage_service_name = "%s" location = "West US" From 05d6c5b509e814a12903ca770d6ee24b790c812f Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 12 Oct 2015 11:42:27 -0500 Subject: [PATCH 018/100] vsphere docs; first draft I'm not familiar with vSphere so I had to skip over details in some places, but this at least gets the basic structure in for the docs. --- website/source/assets/stylesheets/_docs.scss | 1 + .../providers/vsphere/index.html.markdown | 56 +++++++++++++++ .../vsphere/r/virtual_machine.html.markdown | 69 +++++++++++++++++++ website/source/layouts/docs.erb | 4 ++ website/source/layouts/vsphere.erb | 26 +++++++ 5 files changed, 156 insertions(+) create mode 100644 website/source/docs/providers/vsphere/index.html.markdown create mode 100644 website/source/docs/providers/vsphere/r/virtual_machine.html.markdown create mode 100644 website/source/layouts/vsphere.erb diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index 6849f9106..0defd251a 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -23,6 +23,7 @@ body.layout-openstack, body.layout-packet, body.layout-rundeck, body.layout-template, +body.layout-vsphere, body.layout-docs, body.layout-downloads, body.layout-inner, diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown new file mode 100644 index 000000000..3930519a1 --- /dev/null +++ b/website/source/docs/providers/vsphere/index.html.markdown @@ -0,0 +1,56 @@ +--- +layout: "vsphere" +page_title: "Provider: vSphere" +sidebar_current: "docs-vsphere-index" +description: |- + The vSphere provider is used to interact with the resources supported by + vSphere. The provider needs to be configured with the proper credentials before + it can be used. +--- + +# vSphere Provider + +The vSphere provider is used to interact with the resources supported by vSphere. +The provider needs to be configured with the proper credentials before it can be used. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the vSphere Provider +provider "vsphere" { + user = "${var.vsphere_user}" + password = "${var.vsphere_password}" + vcenter_server = "${var.vsphere_vcenter_server}" +} + +# Create a virtual machine +resource "vsphere_virtual_machine" "web" { + name = "terraform_web" + vcpu = 2 + memory = 4096 + + network_interface { + label = "VM Network" + } + + disk { + size = 1 + iops = 500 + } +} +``` + +## Argument Reference + +The following arguments are used to configure the vSphere Provider: + +* `user` - (Required) This is the username for vSphere API operations. Can also + be specified with the `VSPHERE_USER` environment variable. +* `password` - (Required) This is the password for vSphere API operations. Can + also be specified with the `VSPHERE_PASSWORD` environment variable. +* `vcenter_server` - (Required) This is the vCenter server name for vSphere API + operations. Can also be specified with the `VSPHERE_VCENTER` environment + variable. + diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown new file mode 100644 index 000000000..6ce012d65 --- /dev/null +++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown @@ -0,0 +1,69 @@ +--- +layout: "vsphere" +page_title: "vSphere: vsphere_virtual_machine" +sidebar_current: "docs-vsphere-resource-virtual-machine" +description: |- + Provides a vSphere virtual machine resource. This can be used to create, modify, and delete virtual machines. +--- + +# vsphere\_virtual\_machine + +Provides a vSphere virtual machine resource. This can be used to create, +modify, and delete virtual machines. + +## Example Usage + +``` +resource "vsphere_virtual_machine" "web" { + name = "terraform_web" + vcpu = 2 + memory = 4096 + + network_interface { + label = "VM Network" + } + + disk { + size = 1 + iops = 500 + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The virtual machine name +* `vcpu` - (Required) The number of virtual CPUs to allocate to the virtual machine +* `memory` - (Required) The amount of RAM (in MB) to allocate to the virtual machine +* `datacenter` - (Optional) The name of a Datacenter in which to launch the virtual machine +* `cluster` - (Optional) Name of a Cluster in which to launch the virtual machine +* `resource_pool` (Optional) The name of a Resource Pool in which to launch the virtual machine +* `gateway` - (Optional) Gateway IP address to use for all network interfaces +* `domain` - (Optional) A FQDN for the virtual machine; defaults to "vsphere.local" +* `time_zone` - (Optional) The [time zone](https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/timezone.html) to set on the virtual machine. Defaults to "Etc/UTC" +* `dns_suffixes` - (Optional) List of name resolution suffixes for the virtual network adapter +* `dns_servers` - (Optional) List of DNS servers for the virtual network adapter; defaults to 8.8.8.8, 8.8.4.4 +* `network_interface` - (Required) Configures virtual network interfaces; see [Network Interfaces](#network-interfaces) below for details. +* `disk` - (Required) Configures virtual disks; see [Disks](#disks) below for details +* `boot_delay` - (Optional) Time in seconds to wait for machine network to be ready. + + +## Network Interfaces + +Network interfaces support the following attributes: + +* `label` - (Required) Label to assign to this network interface +* `ip_address` - (Optional) Static IP to assign to this network interface. Interface will use DHCP if this is left blank. +* `subnet_mask` - (Optional) Subnet mask to use when statically assigning an IP. + + +## Disks + +Disks support the following attributes: + +* `template` - (Required if size not provided) Template for this disk. +* `datastore` - (Optional) Datastore for this disk +* `size` - (Required if template not provided) Size of this disk (in GB). +* `iops` - (Optional) Number of virtual iops to allocate for this disk. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index af96b52c1..937c120de 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -188,6 +188,10 @@ > Template + + > + vSphere + diff --git a/website/source/layouts/vsphere.erb b/website/source/layouts/vsphere.erb new file mode 100644 index 000000000..49e58c057 --- /dev/null +++ b/website/source/layouts/vsphere.erb @@ -0,0 +1,26 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> From 9d41e6f3d1a3c926850cc984236b3f53d2499127 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 15 Oct 2015 09:35:06 -0500 Subject: [PATCH 019/100] vsphere docs: add warning about possible changes Since we merged this so that the community could collaborate on improvements, I thought it would be prudent to inform potential users of the status of the provider so they know what to expect. --- website/source/docs/providers/vsphere/index.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown index 3930519a1..17448b024 100644 --- a/website/source/docs/providers/vsphere/index.html.markdown +++ b/website/source/docs/providers/vsphere/index.html.markdown @@ -15,6 +15,9 @@ The provider needs to be configured with the proper credentials before it can be Use the navigation to the left to read about the available resources. +~> **NOTE:** The vSphere Provider currently represents _initial support_ and +therefore may undergo significant changes as the community improves it. + ## Example Usage ``` From d918d775f392ecd6b4ae8fb8323cc650d78051f2 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 15 Oct 2015 10:04:55 -0500 Subject: [PATCH 020/100] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a7d91318..1e30d90e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ BUG FIXES: * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] * provider/aws: Fix force_delete on autoscaling groups [GH-3485] * provider/aws: Fix crash with VPC Peering connections [GH-3490] + * provider/aws: fix bug with reading GSIs from dynamodb [GH-3300] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From 562a793430d804f4b1691bd0e00484360cbc2994 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 15 Oct 2015 10:21:20 -0500 Subject: [PATCH 021/100] style: ran go fmt --- terraform/eval_ignore_changes.go | 5 +++-- terraform/transform_resource.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/terraform/eval_ignore_changes.go b/terraform/eval_ignore_changes.go index 1a44089a9..2eb2d9bb1 100644 --- a/terraform/eval_ignore_changes.go +++ b/terraform/eval_ignore_changes.go @@ -1,4 +1,5 @@ package terraform + import ( "github.com/hashicorp/terraform/config" "strings" @@ -9,7 +10,7 @@ import ( // IgnoreChanges lifecycle. type EvalIgnoreChanges struct { Resource *config.Resource - Diff **InstanceDiff + Diff **InstanceDiff } func (n *EvalIgnoreChanges) Eval(ctx EvalContext) (interface{}, error) { @@ -20,7 +21,7 @@ func (n *EvalIgnoreChanges) Eval(ctx EvalContext) (interface{}, error) { diff := *n.Diff ignoreChanges := n.Resource.Lifecycle.IgnoreChanges - for _, ignoredName := range ignoreChanges { + for _, ignoredName := range ignoreChanges { for name := range diff.Attributes { if strings.HasPrefix(name, ignoredName) { delete(diff.Attributes, name) diff --git a/terraform/transform_resource.go b/terraform/transform_resource.go index 81ff158d9..5091f29c9 100644 --- a/terraform/transform_resource.go +++ b/terraform/transform_resource.go @@ -320,7 +320,7 @@ func (n *graphNodeExpandedResource) EvalTree() EvalNode { }, &EvalIgnoreChanges{ Resource: n.Resource, - Diff: &diff, + Diff: &diff, }, &EvalWriteState{ Name: n.stateId(), From 49396ba3e03461d9b2bc486e52c45385adb0f094 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Thu, 15 Oct 2015 15:51:15 +0000 Subject: [PATCH 022/100] v0.6.4 --- CHANGELOG.md | 2 +- deps/v0-6-4.json | 440 +++++++++++++++++++++++++++++++++++++++++++ terraform/version.go | 2 +- 3 files changed, 442 insertions(+), 2 deletions(-) create mode 100644 deps/v0-6-4.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e30d90e7..0b2132d9c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.6.4 (unreleased) +## 0.6.4 (October 15, 2015) FEATURES: diff --git a/deps/v0-6-4.json b/deps/v0-6-4.json new file mode 100644 index 000000000..e0d17b58f --- /dev/null +++ b/deps/v0-6-4.json @@ -0,0 +1,440 @@ +{ + "ImportPath": "github.com/hashicorp/terraform", + "GoVersion": "go1.4.2", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/core/http", + "Comment": "v1.2-261-g3dcabb6", + "Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/core/tls", + "Comment": "v1.2-261-g3dcabb6", + "Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/management", + "Comment": "v1.2-261-g3dcabb6", + "Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Comment": "v1.2-261-g3dcabb6", + "Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311" + }, + { + "ImportPath": "github.com/apparentlymart/go-rundeck-api/rundeck", + "Comment": "v0.0.1", + "Rev": "cddcfbabbe903e9c8df35ff9569dbb8d67789200" + }, + { + "ImportPath": "github.com/armon/circbuf", + "Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/ec2query", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/jsonrpc", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restjson", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/directoryservice", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/ec2", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/ecs", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/efs", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elasticache", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elasticsearchservice", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elb", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/glacier", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/iam", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/kinesis", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/lambda", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/opsworks", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/rds", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/route53", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/sns", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/sqs", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/aws", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/cyberdelia/heroku-go/v3", + "Rev": "8344c6a3e281a99a693f5b71186249a8620eeb6b" + }, + { + "ImportPath": "github.com/dylanmei/iso8601", + "Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4" + }, + { + "ImportPath": "github.com/dylanmei/winrmtest", + "Rev": "3e9661c52c45dab9a8528966a23d421922fca9b9" + }, + { + "ImportPath": "github.com/fsouza/go-dockerclient", + "Rev": "09604abc82243886001c3f56fd709d4ba603cead" + }, + { + "ImportPath": "github.com/hashicorp/atlas-go/archive", + "Comment": "20141209094003-77-g85a782d", + "Rev": "85a782d724b87fcd19db1c4aef9d5337a9bb7a0f" + }, + { + "ImportPath": "github.com/hashicorp/atlas-go/v1", + "Comment": "20141209094003-77-g85a782d", + "Rev": "85a782d724b87fcd19db1c4aef9d5337a9bb7a0f" + }, + { + "ImportPath": "github.com/hashicorp/consul/api", + "Comment": "v0.5.2-325-g5d9530d", + "Rev": "5d9530d7def3be989ba141382f1b9d82583418f4" + }, + { + "ImportPath": "github.com/hashicorp/errwrap", + "Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55" + }, + { + "ImportPath": "github.com/hashicorp/go-checkpoint", + "Rev": "528ab62f37fa83d4360e8ab2b2c425d6692ef533" + }, + { + "ImportPath": "github.com/hashicorp/go-multierror", + "Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" + }, + { + "ImportPath": "github.com/hashicorp/go-version", + "Rev": "2b9865f60ce11e527bd1255ba82036d465570aa3" + }, + { + "ImportPath": "github.com/hashicorp/hcl", + "Rev": "4de51957ef8d4aba6e285ddfc587633bbfc7c0e8" + }, + { + "ImportPath": "github.com/hashicorp/logutils", + "Rev": "0dc08b1671f34c4250ce212759ebd880f743d883" + }, + { + "ImportPath": "github.com/hashicorp/yamux", + "Rev": "ddcd0a6ec7c55e29f235e27935bf98d302281bd3" + }, + { + "ImportPath": "github.com/imdario/mergo", + "Comment": "0.2.0-5-g61a5285", + "Rev": "61a52852277811e93e06d28e0d0c396284a7730b" + }, + { + "ImportPath": "github.com/masterzen/simplexml/dom", + "Rev": "95ba30457eb1121fa27753627c774c7cd4e90083" + }, + { + "ImportPath": "github.com/masterzen/winrm/soap", + "Rev": "b280be362a0c6af26fbaaa055924fb9c4830b006" + }, + { + "ImportPath": "github.com/masterzen/winrm/winrm", + "Rev": "b280be362a0c6af26fbaaa055924fb9c4830b006" + }, + { + "ImportPath": "github.com/masterzen/xmlpath", + "Rev": "13f4951698adc0fa9c1dda3e275d489a24201161" + }, + { + "ImportPath": "github.com/mitchellh/cli", + "Rev": "8102d0ed5ea2709ade1243798785888175f6e415" + }, + { + "ImportPath": "github.com/mitchellh/colorstring", + "Rev": "8631ce90f28644f54aeedcb3e389a85174e067d1" + }, + { + "ImportPath": "github.com/mitchellh/copystructure", + "Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6" + }, + { + "ImportPath": "github.com/mitchellh/go-homedir", + "Rev": "df55a15e5ce646808815381b3db47a8c66ea62f4" + }, + { + "ImportPath": "github.com/mitchellh/go-linereader", + "Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "281073eb9eb092240d33ef253c404f1cca550309" + }, + { + "ImportPath": "github.com/mitchellh/osext", + "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" + }, + { + "ImportPath": "github.com/mitchellh/packer/common/uuid", + "Comment": "v0.8.6-76-g88386bc", + "Rev": "88386bc9db1c850306e5c3737f14bef3a2c4050d" + }, + { + "ImportPath": "github.com/mitchellh/panicwrap", + "Rev": "1655d88c8ff7495ae9d2c19fd8f445f4657e22b0" + }, + { + "ImportPath": "github.com/mitchellh/prefixedio", + "Rev": "89d9b535996bf0a185f85b59578f2e245f9e1724" + }, + { + "ImportPath": "github.com/mitchellh/reflectwalk", + "Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6" + }, + { + "ImportPath": "github.com/nu7hatch/gouuid", + "Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3" + }, + { + "ImportPath": "github.com/packer-community/winrmcp/winrmcp", + "Rev": "743b1afe5ee3f6d5ba71a0d50673fa0ba2123d6b" + }, + { + "ImportPath": "github.com/packethost/packngo", + "Rev": "496f5c8895c06505fae527830a9e554dc65325f4" + }, + { + "ImportPath": "github.com/pborman/uuid", + "Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655" + }, + { + "ImportPath": "github.com/pearkes/cloudflare", + "Rev": "19e280b056f3742e535ea12ae92a37ea7767ea82" + }, + { + "ImportPath": "github.com/pearkes/digitalocean", + "Rev": "e966f00c2d9de5743e87697ab77c7278f5998914" + }, + { + "ImportPath": "github.com/pearkes/dnsimple", + "Rev": "2a807d118c9e52e94819f414a6ec0293b45cad01" + }, + { + "ImportPath": "github.com/pearkes/mailgun", + "Rev": "5b02e7e9ffee9869f81393e80db138f6ff726260" + }, + { + "ImportPath": "github.com/rackspace/gophercloud", + "Comment": "v1.0.0-681-g8d032cb", + "Rev": "8d032cb1e835a0018269de3d6b53bb24fc77a8c0" + }, + { + "ImportPath": "github.com/satori/go.uuid", + "Rev": "08f0718b61e95ddba0ade3346725fe0e4bf28ca6" + }, + { + "ImportPath": "github.com/soniah/dnsmadeeasy", + "Comment": "v1.1-2-g5578a8c", + "Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e" + }, + { + "ImportPath": "github.com/vaughan0/go-ini", + "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1" + }, + { + "ImportPath": "github.com/vmware/govmomi", + "Comment": "v0.2.0-28-g6037863", + "Rev": "603786323c18c13dd8b3da3d4f86b1dce4adf126" + }, + { + "ImportPath": "github.com/xanzy/go-cloudstack/cloudstack", + "Comment": "v1.2.0-48-g0e6e56f", + "Rev": "0e6e56fc0db3f48f060273f2e2ffe5d8d41b0112" + }, + { + "ImportPath": "golang.org/x/crypto/curve25519", + "Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784" + }, + { + "ImportPath": "golang.org/x/crypto/pkcs12", + "Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784" + }, + { + "ImportPath": "golang.org/x/crypto/ssh", + "Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "21c3935a8fc0f954d03e6b8a560c9600ffee38d2" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "ef4eca6b097fad7cec79afcc278d213a6de1c960" + }, + { + "ImportPath": "google.golang.org/api/compute/v1", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/container/v1", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/dns/v1", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/internal", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "4bea1598a0936d6d116506b59a8e1aa962b585c3" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "4bea1598a0936d6d116506b59a8e1aa962b585c3" + } + ] +} diff --git a/terraform/version.go b/terraform/version.go index 741766330..a07a344c1 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -6,4 +6,4 @@ const Version = "0.6.4" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" From 1bfd4b0f7175688d00e344f0ddb6ea6295d11468 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Thu, 15 Oct 2015 17:50:20 +0000 Subject: [PATCH 023/100] Reset CHANGELOG/version for 0.6.5 release --- CHANGELOG.md | 2 ++ terraform/version.go | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b2132d9c..81316bf54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,5 @@ +## 0.6.5 (Unreleased) + ## 0.6.4 (October 15, 2015) FEATURES: diff --git a/terraform/version.go b/terraform/version.go index a07a344c1..badbcd92e 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -1,9 +1,9 @@ package terraform // The main version number that is being run at the moment. -const Version = "0.6.4" +const Version = "0.6.5" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From 26bc27594b1136f45be0844ba9a8bd071f2d9c14 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 15 Oct 2015 20:57:42 +0200 Subject: [PATCH 024/100] docs: Fix EFS documentation --- .../docs/providers/aws/r/efs_mount_target.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown index 59bd3bee2..c29b1b742 100644 --- a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown +++ b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown @@ -6,10 +6,10 @@ description: |- Provides an EFS mount target. --- -# aws\_efs\_file\_system +# aws\_efs\_mount\_target -Provides an EFS file system. Per [documentation](http://docs.aws.amazon.com/efs/latest/ug/limits.html) -the limit is 1 mount target per AZ. +Provides an EFS mount target. Per [documentation](http://docs.aws.amazon.com/efs/latest/ug/limits.html) +the limit is 1 mount target per AZ for a single EFS file system. ## Example Usage From 4017a611c34edf49554a8622bf65ac5122a5b679 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 15 Oct 2015 20:59:58 +0200 Subject: [PATCH 025/100] docs: Glacier Vault - add title + make note more brief --- .../source/docs/providers/aws/r/glacier_vault.html.markdown | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index 6805338c7..d783c0226 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -10,7 +10,7 @@ description: |- Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-vaults.html) for a full explanation of the Glacier Vault functionality -~> **NOTE:** When trying to remove a Glacier Vault, the Vault must be empty. +~> **NOTE:** When removing a Glacier Vault, the Vault must be empty. ## Example Usage @@ -66,6 +66,8 @@ The following arguments are supported: * `events` - (Required) You can configure a vault to publish a notification for `ArchiveRetrievalCompleted` and `InventoryRetrievalCompleted` events. * `sns_topic` - (Required) The SNS Topic ARN. +## Attributes Reference + The following attributes are exported: * `location` - The URI of the vault that was created. From 06f4ac8166595b47638c2ae39e68a6b0e6f549bf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 15 Oct 2015 13:36:58 -0700 Subject: [PATCH 026/100] config/module: use go-getter --- .gitignore | 1 + config/module/copy_dir.go | 76 --------- config/module/detect.go | 92 ----------- config/module/detect_bitbucket.go | 66 -------- config/module/detect_bitbucket_test.go | 67 -------- config/module/detect_file.go | 60 ------- config/module/detect_file_test.go | 88 ---------- config/module/detect_github.go | 73 --------- config/module/detect_github_test.go | 55 ------- config/module/detect_test.go | 51 ------ config/module/folder_storage.go | 65 -------- config/module/folder_storage_test.go | 48 ------ config/module/get.go | 217 +++---------------------- config/module/get_file.go | 46 ------ config/module/get_file_test.go | 104 ------------ config/module/get_git.go | 74 --------- config/module/get_git_test.go | 143 ---------------- config/module/get_hg.go | 89 ---------- config/module/get_hg_test.go | 81 --------- config/module/get_http.go | 173 -------------------- config/module/get_http_test.go | 155 ------------------ config/module/get_test.go | 128 --------------- config/module/module_test.go | 25 +-- config/module/storage.go | 25 --- config/module/tree.go | 28 +--- 25 files changed, 32 insertions(+), 1998 deletions(-) delete mode 100644 config/module/copy_dir.go delete mode 100644 config/module/detect.go delete mode 100644 config/module/detect_bitbucket.go delete mode 100644 config/module/detect_bitbucket_test.go delete mode 100644 config/module/detect_file.go delete mode 100644 config/module/detect_file_test.go delete mode 100644 config/module/detect_github.go delete mode 100644 config/module/detect_github_test.go delete mode 100644 config/module/detect_test.go delete mode 100644 config/module/folder_storage.go delete mode 100644 config/module/folder_storage_test.go delete mode 100644 config/module/get_file.go delete mode 100644 config/module/get_file_test.go delete mode 100644 config/module/get_git.go delete mode 100644 config/module/get_git_test.go delete mode 100644 config/module/get_hg.go delete mode 100644 config/module/get_hg_test.go delete mode 100644 config/module/get_http.go delete mode 100644 config/module/get_http_test.go delete mode 100644 config/module/get_test.go delete mode 100644 config/module/storage.go diff --git a/.gitignore b/.gitignore index 66ea31701..5a230d5ca 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ *.dll *.exe +.DS_Store example.tf terraform.tfplan terraform.tfstate diff --git a/config/module/copy_dir.go b/config/module/copy_dir.go deleted file mode 100644 index f2ae63b77..000000000 --- a/config/module/copy_dir.go +++ /dev/null @@ -1,76 +0,0 @@ -package module - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if path == src { - return nil - } - - if strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} diff --git a/config/module/detect.go b/config/module/detect.go deleted file mode 100644 index 51e07f725..000000000 --- a/config/module/detect.go +++ /dev/null @@ -1,92 +0,0 @@ -package module - -import ( - "fmt" - "path/filepath" - - "github.com/hashicorp/terraform/helper/url" -) - -// Detector defines the interface that an invalid URL or a URL with a blank -// scheme is passed through in order to determine if its shorthand for -// something else well-known. -type Detector interface { - // Detect will detect whether the string matches a known pattern to - // turn it into a proper URL. - Detect(string, string) (string, bool, error) -} - -// Detectors is the list of detectors that are tried on an invalid URL. -// This is also the order they're tried (index 0 is first). -var Detectors []Detector - -func init() { - Detectors = []Detector{ - new(GitHubDetector), - new(BitBucketDetector), - new(FileDetector), - } -} - -// Detect turns a source string into another source string if it is -// detected to be of a known pattern. -// -// This is safe to be called with an already valid source string: Detect -// will just return it. -func Detect(src string, pwd string) (string, error) { - getForce, getSrc := getForcedGetter(src) - - // Separate out the subdir if there is one, we don't pass that to detect - getSrc, subDir := getDirSubdir(getSrc) - - u, err := url.Parse(getSrc) - if err == nil && u.Scheme != "" { - // Valid URL - return src, nil - } - - for _, d := range Detectors { - result, ok, err := d.Detect(getSrc, pwd) - if err != nil { - return "", err - } - if !ok { - continue - } - - var detectForce string - detectForce, result = getForcedGetter(result) - result, detectSubdir := getDirSubdir(result) - - // If we have a subdir from the detection, then prepend it to our - // requested subdir. - if detectSubdir != "" { - if subDir != "" { - subDir = filepath.Join(detectSubdir, subDir) - } else { - subDir = detectSubdir - } - } - if subDir != "" { - u, err := url.Parse(result) - if err != nil { - return "", fmt.Errorf("Error parsing URL: %s", err) - } - u.Path += "//" + subDir - result = u.String() - } - - // Preserve the forced getter if it exists. We try to use the - // original set force first, followed by any force set by the - // detector. - if getForce != "" { - result = fmt.Sprintf("%s::%s", getForce, result) - } else if detectForce != "" { - result = fmt.Sprintf("%s::%s", detectForce, result) - } - - return result, nil - } - - return "", fmt.Errorf("invalid source string: %s", src) -} diff --git a/config/module/detect_bitbucket.go b/config/module/detect_bitbucket.go deleted file mode 100644 index 657637c09..000000000 --- a/config/module/detect_bitbucket.go +++ /dev/null @@ -1,66 +0,0 @@ -package module - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// BitBucketDetector implements Detector to detect BitBucket URLs and turn -// them into URLs that the Git or Hg Getter can understand. -type BitBucketDetector struct{} - -func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "bitbucket.org/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { - u, err := url.Parse("https://" + src) - if err != nil { - return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) - } - - // We need to get info on this BitBucket repository to determine whether - // it is Git or Hg. - var info struct { - SCM string `json:"scm"` - } - infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path - resp, err := http.Get(infoUrl) - if err != nil { - return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) - } - if resp.StatusCode == 403 { - // A private repo - return "", true, fmt.Errorf( - "shorthand BitBucket URL can't be used for private repos, " + - "please use a full URL") - } - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&info); err != nil { - return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) - } - - switch info.SCM { - case "git": - if !strings.HasSuffix(u.Path, ".git") { - u.Path += ".git" - } - - return "git::" + u.String(), true, nil - case "hg": - return "hg::" + u.String(), true, nil - default: - return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) - } -} diff --git a/config/module/detect_bitbucket_test.go b/config/module/detect_bitbucket_test.go deleted file mode 100644 index b05fd5999..000000000 --- a/config/module/detect_bitbucket_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package module - -import ( - "net/http" - "strings" - "testing" -) - -const testBBUrl = "https://bitbucket.org/hashicorp/tf-test-git" - -func TestBitBucketDetector(t *testing.T) { - t.Parallel() - - if _, err := http.Get(testBBUrl); err != nil { - t.Log("internet may not be working, skipping BB tests") - t.Skip() - } - - cases := []struct { - Input string - Output string - }{ - // HTTP - { - "bitbucket.org/hashicorp/tf-test-git", - "git::https://bitbucket.org/hashicorp/tf-test-git.git", - }, - { - "bitbucket.org/hashicorp/tf-test-git.git", - "git::https://bitbucket.org/hashicorp/tf-test-git.git", - }, - { - "bitbucket.org/hashicorp/tf-test-hg", - "hg::https://bitbucket.org/hashicorp/tf-test-hg", - }, - } - - pwd := "/pwd" - f := new(BitBucketDetector) - for i, tc := range cases { - var err error - for i := 0; i < 3; i++ { - var output string - var ok bool - output, ok, err = f.Detect(tc.Input, pwd) - if err != nil { - if strings.Contains(err.Error(), "invalid character") { - continue - } - - t.Fatalf("err: %s", err) - } - if !ok { - t.Fatal("not ok") - } - - if output != tc.Output { - t.Fatalf("%d: bad: %#v", i, output) - } - - break - } - if i >= 3 { - t.Fatalf("failure from bitbucket: %s", err) - } - } -} diff --git a/config/module/detect_file.go b/config/module/detect_file.go deleted file mode 100644 index 859739f95..000000000 --- a/config/module/detect_file.go +++ /dev/null @@ -1,60 +0,0 @@ -package module - -import ( - "fmt" - "os" - "path/filepath" - "runtime" -) - -// FileDetector implements Detector to detect file paths. -type FileDetector struct{} - -func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if !filepath.IsAbs(src) { - if pwd == "" { - return "", true, fmt.Errorf( - "relative paths require a module with a pwd") - } - - // Stat the pwd to determine if its a symbolic link. If it is, - // then the pwd becomes the original directory. Otherwise, - // `filepath.Join` below does some weird stuff. - // - // We just ignore if the pwd doesn't exist. That error will be - // caught later when we try to use the URL. - if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { - if err != nil { - return "", true, err - } - if fi.Mode()&os.ModeSymlink != 0 { - pwd, err = os.Readlink(pwd) - if err != nil { - return "", true, err - } - } - } - - src = filepath.Join(pwd, src) - } - - return fmtFileURL(src), true, nil -} - -func fmtFileURL(path string) string { - if runtime.GOOS == "windows" { - // Make sure we're using "/" on Windows. URLs are "/"-based. - path = filepath.ToSlash(path) - return fmt.Sprintf("file://%s", path) - } - - // Make sure that we don't start with "/" since we add that below. - if path[0] == '/' { - path = path[1:] - } - return fmt.Sprintf("file:///%s", path) -} diff --git a/config/module/detect_file_test.go b/config/module/detect_file_test.go deleted file mode 100644 index 3e9db8bba..000000000 --- a/config/module/detect_file_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package module - -import ( - "runtime" - "testing" -) - -type fileTest struct { - in, pwd, out string - err bool -} - -var fileTests = []fileTest{ - {"./foo", "/pwd", "file:///pwd/foo", false}, - {"./foo?foo=bar", "/pwd", "file:///pwd/foo?foo=bar", false}, - {"foo", "/pwd", "file:///pwd/foo", false}, -} - -var unixFileTests = []fileTest{ - {"/foo", "/pwd", "file:///foo", false}, - {"/foo?bar=baz", "/pwd", "file:///foo?bar=baz", false}, -} - -var winFileTests = []fileTest{ - {"/foo", "/pwd", "file:///pwd/foo", false}, - {`C:\`, `/pwd`, `file://C:/`, false}, - {`C:\?bar=baz`, `/pwd`, `file://C:/?bar=baz`, false}, -} - -func TestFileDetector(t *testing.T) { - if runtime.GOOS == "windows" { - fileTests = append(fileTests, winFileTests...) - } else { - fileTests = append(fileTests, unixFileTests...) - } - - f := new(FileDetector) - for i, tc := range fileTests { - out, ok, err := f.Detect(tc.in, tc.pwd) - if err != nil { - t.Fatalf("err: %s", err) - } - if !ok { - t.Fatal("not ok") - } - - if out != tc.out { - t.Fatalf("%d: bad: %#v", i, out) - } - } -} - -var noPwdFileTests = []fileTest{ - {in: "./foo", pwd: "", out: "", err: true}, - {in: "foo", pwd: "", out: "", err: true}, -} - -var noPwdUnixFileTests = []fileTest{ - {in: "/foo", pwd: "", out: "file:///foo", err: false}, -} - -var noPwdWinFileTests = []fileTest{ - {in: "/foo", pwd: "", out: "", err: true}, - {in: `C:\`, pwd: ``, out: `file://C:/`, err: false}, -} - -func TestFileDetector_noPwd(t *testing.T) { - if runtime.GOOS == "windows" { - noPwdFileTests = append(noPwdFileTests, noPwdWinFileTests...) - } else { - noPwdFileTests = append(noPwdFileTests, noPwdUnixFileTests...) - } - - f := new(FileDetector) - for i, tc := range noPwdFileTests { - out, ok, err := f.Detect(tc.in, tc.pwd) - if err != nil != tc.err { - t.Fatalf("%d: err: %s", i, err) - } - if !ok { - t.Fatal("not ok") - } - - if out != tc.out { - t.Fatalf("%d: bad: %#v", i, out) - } - } -} diff --git a/config/module/detect_github.go b/config/module/detect_github.go deleted file mode 100644 index c4a4e89f0..000000000 --- a/config/module/detect_github.go +++ /dev/null @@ -1,73 +0,0 @@ -package module - -import ( - "fmt" - "net/url" - "strings" -) - -// GitHubDetector implements Detector to detect GitHub URLs and turn -// them into URLs that the Git Getter can understand. -type GitHubDetector struct{} - -func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "github.com/") { - return d.detectHTTP(src) - } else if strings.HasPrefix(src, "git@github.com:") { - return d.detectSSH(src) - } - - return "", false, nil -} - -func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { - parts := strings.Split(src, "/") - if len(parts) < 3 { - return "", false, fmt.Errorf( - "GitHub URLs should be github.com/username/repo") - } - - urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) - } - - if !strings.HasSuffix(url.Path, ".git") { - url.Path += ".git" - } - - if len(parts) > 3 { - url.Path += "//" + strings.Join(parts[3:], "/") - } - - return "git::" + url.String(), true, nil -} - -func (d *GitHubDetector) detectSSH(src string) (string, bool, error) { - idx := strings.Index(src, ":") - qidx := strings.Index(src, "?") - if qidx == -1 { - qidx = len(src) - } - - var u url.URL - u.Scheme = "ssh" - u.User = url.User("git") - u.Host = "github.com" - u.Path = src[idx+1 : qidx] - if qidx < len(src) { - q, err := url.ParseQuery(src[qidx+1:]) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err) - } - - u.RawQuery = q.Encode() - } - - return "git::" + u.String(), true, nil -} diff --git a/config/module/detect_github_test.go b/config/module/detect_github_test.go deleted file mode 100644 index 822e1806d..000000000 --- a/config/module/detect_github_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package module - -import ( - "testing" -) - -func TestGitHubDetector(t *testing.T) { - cases := []struct { - Input string - Output string - }{ - // HTTP - {"github.com/hashicorp/foo", "git::https://github.com/hashicorp/foo.git"}, - {"github.com/hashicorp/foo.git", "git::https://github.com/hashicorp/foo.git"}, - { - "github.com/hashicorp/foo/bar", - "git::https://github.com/hashicorp/foo.git//bar", - }, - { - "github.com/hashicorp/foo?foo=bar", - "git::https://github.com/hashicorp/foo.git?foo=bar", - }, - { - "github.com/hashicorp/foo.git?foo=bar", - "git::https://github.com/hashicorp/foo.git?foo=bar", - }, - - // SSH - {"git@github.com:hashicorp/foo.git", "git::ssh://git@github.com/hashicorp/foo.git"}, - { - "git@github.com:hashicorp/foo.git//bar", - "git::ssh://git@github.com/hashicorp/foo.git//bar", - }, - { - "git@github.com:hashicorp/foo.git?foo=bar", - "git::ssh://git@github.com/hashicorp/foo.git?foo=bar", - }, - } - - pwd := "/pwd" - f := new(GitHubDetector) - for i, tc := range cases { - output, ok, err := f.Detect(tc.Input, pwd) - if err != nil { - t.Fatalf("err: %s", err) - } - if !ok { - t.Fatal("not ok") - } - - if output != tc.Output { - t.Fatalf("%d: bad: %#v", i, output) - } - } -} diff --git a/config/module/detect_test.go b/config/module/detect_test.go deleted file mode 100644 index d2ee8ea1a..000000000 --- a/config/module/detect_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package module - -import ( - "testing" -) - -func TestDetect(t *testing.T) { - cases := []struct { - Input string - Pwd string - Output string - Err bool - }{ - {"./foo", "/foo", "file:///foo/foo", false}, - {"git::./foo", "/foo", "git::file:///foo/foo", false}, - { - "git::github.com/hashicorp/foo", - "", - "git::https://github.com/hashicorp/foo.git", - false, - }, - { - "./foo//bar", - "/foo", - "file:///foo/foo//bar", - false, - }, - { - "git::github.com/hashicorp/foo//bar", - "", - "git::https://github.com/hashicorp/foo.git//bar", - false, - }, - { - "git::https://github.com/hashicorp/consul.git", - "", - "git::https://github.com/hashicorp/consul.git", - false, - }, - } - - for i, tc := range cases { - output, err := Detect(tc.Input, tc.Pwd) - if err != nil != tc.Err { - t.Fatalf("%d: bad err: %s", i, err) - } - if output != tc.Output { - t.Fatalf("%d: bad output: %s\nexpected: %s", i, output, tc.Output) - } - } -} diff --git a/config/module/folder_storage.go b/config/module/folder_storage.go deleted file mode 100644 index 81c9a2ac1..000000000 --- a/config/module/folder_storage.go +++ /dev/null @@ -1,65 +0,0 @@ -package module - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "os" - "path/filepath" -) - -// FolderStorage is an implementation of the Storage interface that manages -// modules on the disk. -type FolderStorage struct { - // StorageDir is the directory where the modules will be stored. - StorageDir string -} - -// Dir implements Storage.Dir -func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { - d = s.dir(key) - _, err = os.Stat(d) - if err == nil { - // Directory exists - e = true - return - } - if os.IsNotExist(err) { - // Directory doesn't exist - d = "" - e = false - err = nil - return - } - - // An error - d = "" - e = false - return -} - -// Get implements Storage.Get -func (s *FolderStorage) Get(key string, source string, update bool) error { - dir := s.dir(key) - if !update { - if _, err := os.Stat(dir); err == nil { - // If the directory already exists, then we're done since - // we're not updating. - return nil - } else if !os.IsNotExist(err) { - // If the error we got wasn't a file-not-exist error, then - // something went wrong and we should report it. - return fmt.Errorf("Error reading module directory: %s", err) - } - } - - // Get the source. This always forces an update. - return Get(dir, source) -} - -// dir returns the directory name internally that we'll use to map to -// internally. -func (s *FolderStorage) dir(key string) string { - sum := md5.Sum([]byte(key)) - return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) -} diff --git a/config/module/folder_storage_test.go b/config/module/folder_storage_test.go deleted file mode 100644 index 7fda6b21a..000000000 --- a/config/module/folder_storage_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package module - -import ( - "os" - "path/filepath" - "testing" -) - -func TestFolderStorage_impl(t *testing.T) { - var _ Storage = new(FolderStorage) -} - -func TestFolderStorage(t *testing.T) { - s := &FolderStorage{StorageDir: tempDir(t)} - - module := testModule("basic") - - // A module shouldn't exist at first... - _, ok, err := s.Dir(module) - if err != nil { - t.Fatalf("err: %s", err) - } - if ok { - t.Fatal("should not exist") - } - - key := "foo" - - // We can get it - err = s.Get(key, module, false) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Now the module exists - dir, ok, err := s.Dir(key) - if err != nil { - t.Fatalf("err: %s", err) - } - if !ok { - t.Fatal("should exist") - } - - mainPath := filepath.Join(dir, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/config/module/get.go b/config/module/get.go index 627d395a9..3820e65f2 100644 --- a/config/module/get.go +++ b/config/module/get.go @@ -1,207 +1,36 @@ package module import ( - "bytes" - "fmt" - "io/ioutil" - "net/url" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - "syscall" - - urlhelper "github.com/hashicorp/terraform/helper/url" + "github.com/hashicorp/go-getter" ) -// Getter defines the interface that schemes must implement to download -// and update modules. -type Getter interface { - // Get downloads the given URL into the given directory. This always - // assumes that we're updating and gets the latest version that it can. - // - // The directory may already exist (if we're updating). If it is in a - // format that isn't understood, an error should be returned. Get shouldn't - // simply nuke the directory. - Get(string, *url.URL) error -} - -// Getters is the mapping of scheme to the Getter implementation that will -// be used to get a dependency. -var Getters map[string]Getter - -// forcedRegexp is the regular expression that finds forced getters. This -// syntax is schema::url, example: git::https://foo.com -var forcedRegexp = regexp.MustCompile(`^([A-Za-z]+)::(.+)$`) - -func init() { - httpGetter := new(HttpGetter) - - Getters = map[string]Getter{ - "file": new(FileGetter), - "git": new(GitGetter), - "hg": new(HgGetter), - "http": httpGetter, - "https": httpGetter, - } -} - -// Get downloads the module specified by src into the folder specified by -// dst. If dst already exists, Get will attempt to update it. +// GetMode is an enum that describes how modules are loaded. // -// src is a URL, whereas dst is always just a file path to a folder. This -// folder doesn't need to exist. It will be created if it doesn't exist. -func Get(dst, src string) error { - var force string - force, src = getForcedGetter(src) - - // If there is a subdir component, then we download the root separately - // and then copy over the proper subdir. - var realDst string - src, subDir := getDirSubdir(src) - if subDir != "" { - tmpDir, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - realDst = dst - dst = tmpDir - } - - u, err := urlhelper.Parse(src) - if err != nil { - return err - } - if force == "" { - force = u.Scheme - } - - g, ok := Getters[force] - if !ok { - return fmt.Errorf( - "module download not supported for scheme '%s'", force) - } - - err = g.Get(dst, u) - if err != nil { - err = fmt.Errorf("error downloading module '%s': %s", src, err) - return err - } - - // If we have a subdir, copy that over - if subDir != "" { - if err := os.RemoveAll(realDst); err != nil { - return err - } - if err := os.MkdirAll(realDst, 0755); err != nil { - return err - } - - return copyDir(realDst, filepath.Join(dst, subDir)) - } - - return nil -} - -// GetCopy is the same as Get except that it downloads a copy of the -// module represented by source. +// GetModeLoad says that modules will not be downloaded or updated, they will +// only be loaded from the storage. // -// This copy will omit and dot-prefixed files (such as .git/, .hg/) and -// can't be updated on its own. -func GetCopy(dst, src string) error { - // Create the temporary directory to do the real Get to - tmpDir, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) +// GetModeGet says that modules can be initially downloaded if they don't +// exist, but otherwise to just load from the current version in storage. +// +// GetModeUpdate says that modules should be checked for updates and +// downloaded prior to loading. If there are no updates, we load the version +// from disk, otherwise we download first and then load. +type GetMode byte - // Get to that temporary dir - if err := Get(tmpDir, src); err != nil { - return err - } +const ( + GetModeNone GetMode = iota + GetModeGet + GetModeUpdate +) - // Make sure the destination exists - if err := os.MkdirAll(dst, 0755); err != nil { - return err - } - - // Copy to the final location - return copyDir(dst, tmpDir) -} - -// getRunCommand is a helper that will run a command and capture the output -// in the case an error happens. -func getRunCommand(cmd *exec.Cmd) error { - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - err := cmd.Run() - if err == nil { - return nil - } - if exiterr, ok := err.(*exec.ExitError); ok { - // The program has exited with an exit code != 0 - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return fmt.Errorf( - "%s exited with %d: %s", - cmd.Path, - status.ExitStatus(), - buf.String()) +func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { + // Get the module with the level specified if we were told to. + if mode > GetModeNone { + if err := s.Get(key, src, mode == GetModeUpdate); err != nil { + return "", false, err } } - return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) -} - -// getDirSubdir takes a source and returns a tuple of the URL without -// the subdir and the URL with the subdir. -func getDirSubdir(src string) (string, string) { - // Calcaulate an offset to avoid accidentally marking the scheme - // as the dir. - var offset int - if idx := strings.Index(src, "://"); idx > -1 { - offset = idx + 3 - } - - // First see if we even have an explicit subdir - idx := strings.Index(src[offset:], "//") - if idx == -1 { - return src, "" - } - - idx += offset - subdir := src[idx+2:] - src = src[:idx] - - // Next, check if we have query parameters and push them onto the - // URL. - if idx = strings.Index(subdir, "?"); idx > -1 { - query := subdir[idx:] - subdir = subdir[:idx] - src += query - } - - return src, subdir -} - -// getForcedGetter takes a source and returns the tuple of the forced -// getter and the raw URL (without the force syntax). -func getForcedGetter(src string) (string, string) { - var forced string - if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { - forced = ms[1] - src = ms[2] - } - - return forced, src + // Get the directory where the module is. + return s.Dir(key) } diff --git a/config/module/get_file.go b/config/module/get_file.go deleted file mode 100644 index 73cb85834..000000000 --- a/config/module/get_file.go +++ /dev/null @@ -1,46 +0,0 @@ -package module - -import ( - "fmt" - "net/url" - "os" - "path/filepath" -) - -// FileGetter is a Getter implementation that will download a module from -// a file scheme. -type FileGetter struct{} - -func (g *FileGetter) Get(dst string, u *url.URL) error { - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(u.Path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - - fi, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - mode := fi.Mode() - if mode&os.ModeSymlink == 0 { - return fmt.Errorf("destination exists and is not a symlink") - } - - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - return os.Symlink(u.Path, dst) -} diff --git a/config/module/get_file_test.go b/config/module/get_file_test.go deleted file mode 100644 index 4c9f6126a..000000000 --- a/config/module/get_file_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package module - -import ( - "os" - "path/filepath" - "testing" -) - -func TestFileGetter_impl(t *testing.T) { - var _ Getter = new(FileGetter) -} - -func TestFileGetter(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - - // With a dir that doesn't exist - if err := g.Get(dst, testModuleURL("basic")); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the destination folder is a symlink - fi, err := os.Lstat(dst) - if err != nil { - t.Fatalf("err: %s", err) - } - if fi.Mode()&os.ModeSymlink == 0 { - t.Fatal("destination is not a symlink") - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestFileGetter_sourceFile(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - - // With a source URL that is a path to a file - u := testModuleURL("basic") - u.Path += "/main.tf" - if err := g.Get(dst, u); err == nil { - t.Fatal("should error") - } -} - -func TestFileGetter_sourceNoExist(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - - // With a source URL that doesn't exist - u := testModuleURL("basic") - u.Path += "/main" - if err := g.Get(dst, u); err == nil { - t.Fatal("should error") - } -} - -func TestFileGetter_dir(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - - if err := os.MkdirAll(dst, 0755); err != nil { - t.Fatalf("err: %s", err) - } - - // With a dir that exists that isn't a symlink - if err := g.Get(dst, testModuleURL("basic")); err == nil { - t.Fatal("should error") - } -} - -func TestFileGetter_dirSymlink(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - dst2 := tempDir(t) - - // Make parents - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - t.Fatalf("err: %s", err) - } - if err := os.MkdirAll(dst2, 0755); err != nil { - t.Fatalf("err: %s", err) - } - - // Make a symlink - if err := os.Symlink(dst2, dst); err != nil { - t.Fatalf("err: %s", err) - } - - // With a dir that exists that isn't a symlink - if err := g.Get(dst, testModuleURL("basic")); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/config/module/get_git.go b/config/module/get_git.go deleted file mode 100644 index 5ab27ba0b..000000000 --- a/config/module/get_git.go +++ /dev/null @@ -1,74 +0,0 @@ -package module - -import ( - "fmt" - "net/url" - "os" - "os/exec" -) - -// GitGetter is a Getter implementation that will download a module from -// a git repository. -type GitGetter struct{} - -func (g *GitGetter) Get(dst string, u *url.URL) error { - if _, err := exec.LookPath("git"); err != nil { - return fmt.Errorf("git must be available and on the PATH") - } - - // Extract some query parameters we use - var ref string - q := u.Query() - if len(q) > 0 { - ref = q.Get("ref") - q.Del("ref") - - // Copy the URL - var newU url.URL = *u - u = &newU - u.RawQuery = q.Encode() - } - - // First: clone or update the repository - _, err := os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - err = g.update(dst, u) - } else { - err = g.clone(dst, u) - } - if err != nil { - return err - } - - // Next: check out the proper tag/branch if it is specified, and checkout - if ref == "" { - return nil - } - - return g.checkout(dst, ref) -} - -func (g *GitGetter) checkout(dst string, ref string) error { - cmd := exec.Command("git", "checkout", ref) - cmd.Dir = dst - return getRunCommand(cmd) -} - -func (g *GitGetter) clone(dst string, u *url.URL) error { - cmd := exec.Command("git", "clone", u.String(), dst) - return getRunCommand(cmd) -} - -func (g *GitGetter) update(dst string, u *url.URL) error { - // We have to be on a branch to pull - if err := g.checkout(dst, "master"); err != nil { - return err - } - - cmd := exec.Command("git", "pull", "--ff-only") - cmd.Dir = dst - return getRunCommand(cmd) -} diff --git a/config/module/get_git_test.go b/config/module/get_git_test.go deleted file mode 100644 index 3885ff8e7..000000000 --- a/config/module/get_git_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package module - -import ( - "os" - "os/exec" - "path/filepath" - "testing" -) - -var testHasGit bool - -func init() { - if _, err := exec.LookPath("git"); err == nil { - testHasGit = true - } -} - -func TestGitGetter_impl(t *testing.T) { - var _ Getter = new(GitGetter) -} - -func TestGitGetter(t *testing.T) { - if !testHasGit { - t.Log("git not found, skipping") - t.Skip() - } - - g := new(GitGetter) - dst := tempDir(t) - - // Git doesn't allow nested ".git" directories so we do some hackiness - // here to get around that... - moduleDir := filepath.Join(fixtureDir, "basic-git") - oldName := filepath.Join(moduleDir, "DOTgit") - newName := filepath.Join(moduleDir, ".git") - if err := os.Rename(oldName, newName); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Rename(newName, oldName) - - // With a dir that doesn't exist - if err := g.Get(dst, testModuleURL("basic-git")); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGitGetter_branch(t *testing.T) { - if !testHasGit { - t.Log("git not found, skipping") - t.Skip() - } - - g := new(GitGetter) - dst := tempDir(t) - - // Git doesn't allow nested ".git" directories so we do some hackiness - // here to get around that... - moduleDir := filepath.Join(fixtureDir, "basic-git") - oldName := filepath.Join(moduleDir, "DOTgit") - newName := filepath.Join(moduleDir, ".git") - if err := os.Rename(oldName, newName); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Rename(newName, oldName) - - url := testModuleURL("basic-git") - q := url.Query() - q.Add("ref", "test-branch") - url.RawQuery = q.Encode() - - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main_branch.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } - - // Get again should work - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath = filepath.Join(dst, "main_branch.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGitGetter_tag(t *testing.T) { - if !testHasGit { - t.Log("git not found, skipping") - t.Skip() - } - - g := new(GitGetter) - dst := tempDir(t) - - // Git doesn't allow nested ".git" directories so we do some hackiness - // here to get around that... - moduleDir := filepath.Join(fixtureDir, "basic-git") - oldName := filepath.Join(moduleDir, "DOTgit") - newName := filepath.Join(moduleDir, ".git") - if err := os.Rename(oldName, newName); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Rename(newName, oldName) - - url := testModuleURL("basic-git") - q := url.Query() - q.Add("ref", "v1.0") - url.RawQuery = q.Encode() - - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main_tag1.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } - - // Get again should work - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath = filepath.Join(dst, "main_tag1.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/config/module/get_hg.go b/config/module/get_hg.go deleted file mode 100644 index f74c14093..000000000 --- a/config/module/get_hg.go +++ /dev/null @@ -1,89 +0,0 @@ -package module - -import ( - "fmt" - "net/url" - "os" - "os/exec" - "runtime" - - urlhelper "github.com/hashicorp/terraform/helper/url" -) - -// HgGetter is a Getter implementation that will download a module from -// a Mercurial repository. -type HgGetter struct{} - -func (g *HgGetter) Get(dst string, u *url.URL) error { - if _, err := exec.LookPath("hg"); err != nil { - return fmt.Errorf("hg must be available and on the PATH") - } - - newURL, err := urlhelper.Parse(u.String()) - if err != nil { - return err - } - if fixWindowsDrivePath(newURL) { - // See valid file path form on http://www.selenic.com/hg/help/urls - newURL.Path = fmt.Sprintf("/%s", newURL.Path) - } - - // Extract some query parameters we use - var rev string - q := newURL.Query() - if len(q) > 0 { - rev = q.Get("rev") - q.Del("rev") - - newURL.RawQuery = q.Encode() - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err != nil { - if err := g.clone(dst, newURL); err != nil { - return err - } - } - - if err := g.pull(dst, newURL); err != nil { - return err - } - - return g.update(dst, newURL, rev) -} - -func (g *HgGetter) clone(dst string, u *url.URL) error { - cmd := exec.Command("hg", "clone", "-U", u.String(), dst) - return getRunCommand(cmd) -} - -func (g *HgGetter) pull(dst string, u *url.URL) error { - cmd := exec.Command("hg", "pull") - cmd.Dir = dst - return getRunCommand(cmd) -} - -func (g *HgGetter) update(dst string, u *url.URL, rev string) error { - args := []string{"update"} - if rev != "" { - args = append(args, rev) - } - - cmd := exec.Command("hg", args...) - cmd.Dir = dst - return getRunCommand(cmd) -} - -func fixWindowsDrivePath(u *url.URL) bool { - // hg assumes a file:/// prefix for Windows drive letter file paths. - // (e.g. file:///c:/foo/bar) - // If the URL Path does not begin with a '/' character, the resulting URL - // path will have a file:// prefix. (e.g. file://c:/foo/bar) - // See http://www.selenic.com/hg/help/urls and the examples listed in - // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 - return runtime.GOOS == "windows" && u.Scheme == "file" && - len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' -} diff --git a/config/module/get_hg_test.go b/config/module/get_hg_test.go deleted file mode 100644 index d7125bde2..000000000 --- a/config/module/get_hg_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package module - -import ( - "os" - "os/exec" - "path/filepath" - "testing" -) - -var testHasHg bool - -func init() { - if _, err := exec.LookPath("hg"); err == nil { - testHasHg = true - } -} - -func TestHgGetter_impl(t *testing.T) { - var _ Getter = new(HgGetter) -} - -func TestHgGetter(t *testing.T) { - t.Parallel() - - if !testHasHg { - t.Log("hg not found, skipping") - t.Skip() - } - - g := new(HgGetter) - dst := tempDir(t) - - // With a dir that doesn't exist - if err := g.Get(dst, testModuleURL("basic-hg")); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestHgGetter_branch(t *testing.T) { - t.Parallel() - - if !testHasHg { - t.Log("hg not found, skipping") - t.Skip() - } - - g := new(HgGetter) - dst := tempDir(t) - - url := testModuleURL("basic-hg") - q := url.Query() - q.Add("rev", "test-branch") - url.RawQuery = q.Encode() - - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main_branch.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } - - // Get again should work - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath = filepath.Join(dst, "main_branch.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/config/module/get_http.go b/config/module/get_http.go deleted file mode 100644 index be65d921a..000000000 --- a/config/module/get_http.go +++ /dev/null @@ -1,173 +0,0 @@ -package module - -import ( - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" -) - -// HttpGetter is a Getter implementation that will download a module from -// an HTTP endpoint. The protocol for downloading a module from an HTTP -// endpoing is as follows: -// -// An HTTP GET request is made to the URL with the additional GET parameter -// "terraform-get=1". This lets you handle that scenario specially if you -// wish. The response must be a 2xx. -// -// First, a header is looked for "X-Terraform-Get" which should contain -// a source URL to download. -// -// If the header is not present, then a meta tag is searched for named -// "terraform-get" and the content should be a source URL. -// -// The source URL, whether from the header or meta tag, must be a fully -// formed URL. The shorthand syntax of "github.com/foo/bar" or relative -// paths are not allowed. -type HttpGetter struct{} - -func (g *HttpGetter) Get(dst string, u *url.URL) error { - // Copy the URL so we can modify it - var newU url.URL = *u - u = &newU - - // Add terraform-get to the parameter. - q := u.Query() - q.Add("terraform-get", "1") - u.RawQuery = q.Encode() - - // Get the URL - resp, err := http.Get(u.String()) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("bad response code: %d", resp.StatusCode) - } - - // Extract the source URL - var source string - if v := resp.Header.Get("X-Terraform-Get"); v != "" { - source = v - } else { - source, err = g.parseMeta(resp.Body) - if err != nil { - return err - } - } - if source == "" { - return fmt.Errorf("no source URL was returned") - } - - // If there is a subdir component, then we download the root separately - // into a temporary directory, then copy over the proper subdir. - source, subDir := getDirSubdir(source) - if subDir == "" { - return Get(dst, source) - } - - // We have a subdir, time to jump some hoops - return g.getSubdir(dst, source, subDir) -} - -// getSubdir downloads the source into the destination, but with -// the proper subdir. -func (g *HttpGetter) getSubdir(dst, source, subDir string) error { - // Create a temporary directory to store the full source - td, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - defer os.RemoveAll(td) - - // Download that into the given directory - if err := Get(td, source); err != nil { - return err - } - - // Make sure the subdir path actually exists - sourcePath := filepath.Join(td, subDir) - if _, err := os.Stat(sourcePath); err != nil { - return fmt.Errorf( - "Error downloading %s: %s", source, err) - } - - // Copy the subdirectory into our actual destination. - if err := os.RemoveAll(dst); err != nil { - return err - } - - // Make the final destination - if err := os.MkdirAll(dst, 0755); err != nil { - return err - } - - return copyDir(dst, sourcePath) -} - -// parseMeta looks for the first meta tag in the given reader that -// will give us the source URL. -func (g *HttpGetter) parseMeta(r io.Reader) (string, error) { - d := xml.NewDecoder(r) - d.CharsetReader = charsetReader - d.Strict = false - var err error - var t xml.Token - for { - t, err = d.Token() - if err != nil { - if err == io.EOF { - err = nil - } - return "", err - } - if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { - return "", nil - } - if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { - return "", nil - } - e, ok := t.(xml.StartElement) - if !ok || !strings.EqualFold(e.Name.Local, "meta") { - continue - } - if attrValue(e.Attr, "name") != "terraform-get" { - continue - } - if f := attrValue(e.Attr, "content"); f != "" { - return f, nil - } - } -} - -// attrValue returns the attribute value for the case-insensitive key -// `name', or the empty string if nothing is found. -func attrValue(attrs []xml.Attr, name string) string { - for _, a := range attrs { - if strings.EqualFold(a.Name.Local, name) { - return a.Value - } - } - return "" -} - -// charsetReader returns a reader for the given charset. Currently -// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful -// error which is printed by go get, so the user can find why the package -// wasn't downloaded if the encoding is not supported. Note that, in -// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters -// greater than 0x7f are not rejected). -func charsetReader(charset string, input io.Reader) (io.Reader, error) { - switch strings.ToLower(charset) { - case "ascii": - return input, nil - default: - return nil, fmt.Errorf("can't decode XML document using charset %q", charset) - } -} diff --git a/config/module/get_http_test.go b/config/module/get_http_test.go deleted file mode 100644 index 5f2590f48..000000000 --- a/config/module/get_http_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package module - -import ( - "fmt" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "testing" -) - -func TestHttpGetter_impl(t *testing.T) { - var _ Getter = new(HttpGetter) -} - -func TestHttpGetter_header(t *testing.T) { - ln := testHttpServer(t) - defer ln.Close() - - g := new(HttpGetter) - dst := tempDir(t) - - var u url.URL - u.Scheme = "http" - u.Host = ln.Addr().String() - u.Path = "/header" - - // Get it! - if err := g.Get(dst, &u); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestHttpGetter_meta(t *testing.T) { - ln := testHttpServer(t) - defer ln.Close() - - g := new(HttpGetter) - dst := tempDir(t) - - var u url.URL - u.Scheme = "http" - u.Host = ln.Addr().String() - u.Path = "/meta" - - // Get it! - if err := g.Get(dst, &u); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestHttpGetter_metaSubdir(t *testing.T) { - ln := testHttpServer(t) - defer ln.Close() - - g := new(HttpGetter) - dst := tempDir(t) - - var u url.URL - u.Scheme = "http" - u.Host = ln.Addr().String() - u.Path = "/meta-subdir" - - // Get it! - if err := g.Get(dst, &u); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "sub.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestHttpGetter_none(t *testing.T) { - ln := testHttpServer(t) - defer ln.Close() - - g := new(HttpGetter) - dst := tempDir(t) - - var u url.URL - u.Scheme = "http" - u.Host = ln.Addr().String() - u.Path = "/none" - - // Get it! - if err := g.Get(dst, &u); err == nil { - t.Fatal("should error") - } -} - -func testHttpServer(t *testing.T) net.Listener { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - mux := http.NewServeMux() - mux.HandleFunc("/header", testHttpHandlerHeader) - mux.HandleFunc("/meta", testHttpHandlerMeta) - mux.HandleFunc("/meta-subdir", testHttpHandlerMetaSubdir) - - var server http.Server - server.Handler = mux - go server.Serve(ln) - - return ln -} - -func testHttpHandlerHeader(w http.ResponseWriter, r *http.Request) { - w.Header().Add("X-Terraform-Get", testModuleURL("basic").String()) - w.WriteHeader(200) -} - -func testHttpHandlerMeta(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(fmt.Sprintf(testHttpMetaStr, testModuleURL("basic").String()))) -} - -func testHttpHandlerMetaSubdir(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(fmt.Sprintf(testHttpMetaStr, testModuleURL("basic//subdir").String()))) -} - -func testHttpHandlerNone(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(testHttpNoneStr)) -} - -const testHttpMetaStr = ` - - - - - -` - -const testHttpNoneStr = ` - - - - -` diff --git a/config/module/get_test.go b/config/module/get_test.go deleted file mode 100644 index b403c835c..000000000 --- a/config/module/get_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package module - -import ( - "os" - "path/filepath" - "strings" - "testing" -) - -func TestGet_badSchema(t *testing.T) { - dst := tempDir(t) - u := testModule("basic") - u = strings.Replace(u, "file", "nope", -1) - - if err := Get(dst, u); err == nil { - t.Fatal("should error") - } -} - -func TestGet_file(t *testing.T) { - dst := tempDir(t) - u := testModule("basic") - - if err := Get(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGet_fileForced(t *testing.T) { - dst := tempDir(t) - u := testModule("basic") - u = "file::" + u - - if err := Get(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGet_fileSubdir(t *testing.T) { - dst := tempDir(t) - u := testModule("basic//subdir") - - if err := Get(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "sub.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGetCopy_dot(t *testing.T) { - dst := tempDir(t) - u := testModule("basic-dot") - - if err := GetCopy(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath = filepath.Join(dst, "foo.tf") - if _, err := os.Stat(mainPath); err == nil { - t.Fatal("should not have foo.tf") - } -} - -func TestGetCopy_file(t *testing.T) { - dst := tempDir(t) - u := testModule("basic") - - if err := GetCopy(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGetDirSubdir(t *testing.T) { - cases := []struct { - Input string - Dir, Sub string - }{ - { - "hashicorp.com", - "hashicorp.com", "", - }, - { - "hashicorp.com//foo", - "hashicorp.com", "foo", - }, - { - "hashicorp.com//foo?bar=baz", - "hashicorp.com?bar=baz", "foo", - }, - { - "file://foo//bar", - "file://foo", "bar", - }, - } - - for i, tc := range cases { - adir, asub := getDirSubdir(tc.Input) - if adir != tc.Dir { - t.Fatalf("%d: bad dir: %#v", i, adir) - } - if asub != tc.Sub { - t.Fatalf("%d: bad sub: %#v", i, asub) - } - } -} diff --git a/config/module/module_test.go b/config/module/module_test.go index f1517e480..89fee6ec5 100644 --- a/config/module/module_test.go +++ b/config/module/module_test.go @@ -2,13 +2,12 @@ package module import ( "io/ioutil" - "net/url" "os" "path/filepath" "testing" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" - urlhelper "github.com/hashicorp/terraform/helper/url" ) const fixtureDir = "./test-fixtures" @@ -34,24 +33,6 @@ func testConfig(t *testing.T, n string) *config.Config { return c } -func testModule(n string) string { - p := filepath.Join(fixtureDir, n) - p, err := filepath.Abs(p) - if err != nil { - panic(err) - } - return fmtFileURL(p) -} - -func testModuleURL(n string) *url.URL { - u, err := urlhelper.Parse(testModule(n)) - if err != nil { - panic(err) - } - - return u -} - -func testStorage(t *testing.T) Storage { - return &FolderStorage{StorageDir: tempDir(t)} +func testStorage(t *testing.T) getter.Storage { + return &getter.FolderStorage{StorageDir: tempDir(t)} } diff --git a/config/module/storage.go b/config/module/storage.go deleted file mode 100644 index 9c752f630..000000000 --- a/config/module/storage.go +++ /dev/null @@ -1,25 +0,0 @@ -package module - -// Storage is an interface that knows how to lookup downloaded modules -// as well as download and update modules from their sources into the -// proper location. -type Storage interface { - // Dir returns the directory on local disk where the modulue source - // can be loaded from. - Dir(string) (string, bool, error) - - // Get will download and optionally update the given module. - Get(string, string, bool) error -} - -func getStorage(s Storage, key string, src string, mode GetMode) (string, bool, error) { - // Get the module with the level specified if we were told to. - if mode > GetModeNone { - if err := s.Get(key, src, mode == GetModeUpdate); err != nil { - return "", false, err - } - } - - // Get the directory where the module is. - return s.Dir(key) -} diff --git a/config/module/tree.go b/config/module/tree.go index d7b3ac966..6a75c19c2 100644 --- a/config/module/tree.go +++ b/config/module/tree.go @@ -8,6 +8,7 @@ import ( "strings" "sync" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" ) @@ -27,25 +28,6 @@ type Tree struct { lock sync.RWMutex } -// GetMode is an enum that describes how modules are loaded. -// -// GetModeLoad says that modules will not be downloaded or updated, they will -// only be loaded from the storage. -// -// GetModeGet says that modules can be initially downloaded if they don't -// exist, but otherwise to just load from the current version in storage. -// -// GetModeUpdate says that modules should be checked for updates and -// downloaded prior to loading. If there are no updates, we load the version -// from disk, otherwise we download first and then load. -type GetMode byte - -const ( - GetModeNone GetMode = iota - GetModeGet - GetModeUpdate -) - // NewTree returns a new Tree for the given config structure. func NewTree(name string, c *config.Config) *Tree { return &Tree{config: c, name: name} @@ -136,7 +118,7 @@ func (t *Tree) Name() string { // module trees inherently require the configuration to be in a reasonably // sane state: no circular dependencies, proper module sources, etc. A full // suite of validations can be done by running Validate (after loading). -func (t *Tree) Load(s Storage, mode GetMode) error { +func (t *Tree) Load(s getter.Storage, mode GetMode) error { t.lock.Lock() defer t.lock.Unlock() @@ -159,15 +141,15 @@ func (t *Tree) Load(s Storage, mode GetMode) error { path = append(path, m.Name) // Split out the subdir if we have one - source, subDir := getDirSubdir(m.Source) + source, subDir := getter.SourceDirSubdir(m.Source) - source, err := Detect(source, t.config.Dir) + source, err := getter.Detect(source, t.config.Dir, getter.Detectors) if err != nil { return fmt.Errorf("module %s: %s", m.Name, err) } // Check if the detector introduced something new. - source, subDir2 := getDirSubdir(source) + source, subDir2 := getter.SourceDirSubdir(source) if subDir2 != "" { subDir = filepath.Join(subDir2, subDir) } From 344e7c26b5f116842932d0e6b6ad2f1a250526f4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 15 Oct 2015 13:48:58 -0700 Subject: [PATCH 027/100] fix a bunch of tests from go-getter import --- command/apply.go | 4 +- command/command_test.go | 3 +- command/init.go | 3 +- command/meta.go | 5 +- command/module_storage.go | 4 +- command/module_storage_test.go | 4 +- config/lang/y.go | 272 ++++++++++++++++++++++++--------- config/module/copy_dir.go | 76 +++++++++ config/module/get.go | 33 ++++ helper/resource/testing.go | 3 +- 10 files changed, 328 insertions(+), 79 deletions(-) create mode 100644 config/module/copy_dir.go diff --git a/command/apply.go b/command/apply.go index 8001cfe07..0687116a8 100644 --- a/command/apply.go +++ b/command/apply.go @@ -7,8 +7,8 @@ import ( "sort" "strings" + "github.com/hashicorp/go-getter" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" ) @@ -76,7 +76,7 @@ func (c *ApplyCommand) Run(args []string) int { if !c.Destroy && maybeInit { // Do a detect to determine if we need to do an init + apply. - if detected, err := module.Detect(configPath, pwd); err != nil { + if detected, err := getter.Detect(configPath, pwd, getter.Detectors); err != nil { c.Ui.Error(fmt.Sprintf( "Invalid path: %s", err)) return 1 diff --git a/command/command_test.go b/command/command_test.go index 2b9f93dd1..954579c3d 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" ) @@ -73,7 +74,7 @@ func testModule(t *testing.T, name string) *module.Tree { t.Fatalf("err: %s", err) } - s := &module.FolderStorage{StorageDir: tempDir(t)} + s := &getter.FolderStorage{StorageDir: tempDir(t)} if err := mod.Load(s, module.GetModeGet); err != nil { t.Fatalf("err: %s", err) } diff --git a/command/init.go b/command/init.go index fb842d08d..1b92c0806 100644 --- a/command/init.go +++ b/command/init.go @@ -6,6 +6,7 @@ import ( "os" "strings" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" @@ -75,7 +76,7 @@ func (c *InitCommand) Run(args []string) int { } // Detect - source, err = module.Detect(source, pwd) + source, err = getter.Detect(source, pwd, getter.Detectors) if err != nil { c.Ui.Error(fmt.Sprintf( "Error with module source: %s", err)) diff --git a/command/meta.go b/command/meta.go index af4a52302..3a12de02f 100644 --- a/command/meta.go +++ b/command/meta.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strconv" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/state" "github.com/hashicorp/terraform/terraform" @@ -330,9 +331,9 @@ func (m *Meta) flagSet(n string) *flag.FlagSet { // moduleStorage returns the module.Storage implementation used to store // modules for commands. -func (m *Meta) moduleStorage(root string) module.Storage { +func (m *Meta) moduleStorage(root string) getter.Storage { return &uiModuleStorage{ - Storage: &module.FolderStorage{ + Storage: &getter.FolderStorage{ StorageDir: filepath.Join(root, "modules"), }, Ui: m.Ui, diff --git a/command/module_storage.go b/command/module_storage.go index e17786a80..5bb832897 100644 --- a/command/module_storage.go +++ b/command/module_storage.go @@ -3,14 +3,14 @@ package command import ( "fmt" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/go-getter" "github.com/mitchellh/cli" ) // uiModuleStorage implements module.Storage and is just a proxy to output // to the UI any Get operations. type uiModuleStorage struct { - Storage module.Storage + Storage getter.Storage Ui cli.Ui } diff --git a/command/module_storage_test.go b/command/module_storage_test.go index b77c2b5f7..97a5ed7ae 100644 --- a/command/module_storage_test.go +++ b/command/module_storage_test.go @@ -3,9 +3,9 @@ package command import ( "testing" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/go-getter" ) func TestUiModuleStorage_impl(t *testing.T) { - var _ module.Storage = new(uiModuleStorage) + var _ getter.Storage = new(uiModuleStorage) } diff --git a/config/lang/y.go b/config/lang/y.go index e7dd185ae..fd0693f15 100644 --- a/config/lang/y.go +++ b/config/lang/y.go @@ -30,7 +30,10 @@ const INTEGER = 57355 const FLOAT = 57356 const STRING = 57357 -var parserToknames = []string{ +var parserToknames = [...]string{ + "$end", + "error", + "$unk", "PROGRAM_BRACKET_LEFT", "PROGRAM_BRACKET_RIGHT", "PROGRAM_STRING_START", @@ -44,7 +47,7 @@ var parserToknames = []string{ "FLOAT", "STRING", } -var parserStatenames = []string{} +var parserStatenames = [...]string{} const parserEofCode = 1 const parserErrCode = 2 @@ -53,7 +56,7 @@ const parserMaxDepth = 200 //line lang.y:165 //line yacctab:1 -var parserExca = []int{ +var parserExca = [...]int{ -1, 1, 1, -1, -2, 0, @@ -67,75 +70,103 @@ var parserStates []string const parserLast = 30 -var parserAct = []int{ +var parserAct = [...]int{ 9, 20, 16, 16, 7, 7, 3, 18, 10, 8, 1, 17, 14, 12, 13, 6, 6, 19, 8, 22, 15, 23, 24, 11, 2, 25, 16, 21, 4, 5, } -var parserPact = []int{ +var parserPact = [...]int{ 1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15, 0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000, -1000, 12, -9, -1000, 0, -9, } -var parserPgo = []int{ +var parserPgo = [...]int{ 0, 0, 29, 28, 23, 6, 27, 10, } -var parserR1 = []int{ +var parserR1 = [...]int{ 0, 7, 7, 4, 4, 5, 5, 2, 1, 1, 1, 1, 1, 1, 1, 6, 6, 6, 3, } -var parserR2 = []int{ +var parserR2 = [...]int{ 0, 0, 1, 1, 2, 1, 1, 3, 3, 1, 1, 1, 3, 1, 4, 0, 3, 1, 1, } -var parserChk = []int{ +var parserChk = [...]int{ -1000, -7, -4, -5, -3, -2, 15, 4, -5, -1, 8, -4, 13, 14, 12, 5, 11, -1, 8, -1, 9, -6, -1, 9, 10, -1, } -var parserDef = []int{ +var parserDef = [...]int{ 1, -2, 2, 3, 5, 6, 18, 0, 4, 0, 0, 9, 10, 11, 13, 7, 0, 0, 15, 12, 8, 0, 17, 14, 0, 16, } -var parserTok1 = []int{ +var parserTok1 = [...]int{ 1, } -var parserTok2 = []int{ +var parserTok2 = [...]int{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, } -var parserTok3 = []int{ +var parserTok3 = [...]int{ 0, } +var parserErrorMessages = [...]struct { + state int + token int + msg string +}{} + //line yaccpar:1 /* parser for yacc output */ -var parserDebug = 0 +var ( + parserDebug = 0 + parserErrorVerbose = false +) type parserLexer interface { Lex(lval *parserSymType) int Error(s string) } +type parserParser interface { + Parse(parserLexer) int + Lookahead() int +} + +type parserParserImpl struct { + lookahead func() int +} + +func (p *parserParserImpl) Lookahead() int { + return p.lookahead() +} + +func parserNewParser() parserParser { + p := &parserParserImpl{ + lookahead: func() int { return -1 }, + } + return p +} + const parserFlag = -1000 func parserTokname(c int) string { - // 4 is TOKSTART above - if c >= 4 && c-4 < len(parserToknames) { - if parserToknames[c-4] != "" { - return parserToknames[c-4] + if c >= 1 && c-1 < len(parserToknames) { + if parserToknames[c-1] != "" { + return parserToknames[c-1] } } return __yyfmt__.Sprintf("tok-%v", c) @@ -150,51 +181,129 @@ func parserStatname(s int) string { return __yyfmt__.Sprintf("state-%v", s) } -func parserlex1(lex parserLexer, lval *parserSymType) int { - c := 0 - char := lex.Lex(lval) +func parserErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !parserErrorVerbose { + return "syntax error" + } + + for _, e := range parserErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + parserTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := parserPact[state] + for tok := TOKSTART; tok-1 < len(parserToknames); tok++ { + if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if parserDef[state] == -2 { + i := 0 + for parserExca[i] != -1 || parserExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; parserExca[i] >= 0; i += 2 { + tok := parserExca[i] + if tok < TOKSTART || parserExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if parserExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += parserTokname(tok) + } + return res +} + +func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) if char <= 0 { - c = parserTok1[0] + token = parserTok1[0] goto out } if char < len(parserTok1) { - c = parserTok1[char] + token = parserTok1[char] goto out } if char >= parserPrivate { if char < parserPrivate+len(parserTok2) { - c = parserTok2[char-parserPrivate] + token = parserTok2[char-parserPrivate] goto out } } for i := 0; i < len(parserTok3); i += 2 { - c = parserTok3[i+0] - if c == char { - c = parserTok3[i+1] + token = parserTok3[i+0] + if token == char { + token = parserTok3[i+1] goto out } } out: - if c == 0 { - c = parserTok2[1] /* unknown char */ + if token == 0 { + token = parserTok2[1] /* unknown char */ } if parserDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char)) + __yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char)) } - return c + return char, token } func parserParse(parserlex parserLexer) int { + return parserNewParser().Parse(parserlex) +} + +func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int { var parsern int var parserlval parserSymType var parserVAL parserSymType + var parserDollar []parserSymType + _ = parserDollar // silence set and not used parserS := make([]parserSymType, parserMaxDepth) Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ parserstate := 0 parserchar := -1 + parsertoken := -1 // parserchar translated into internal numbering + parserrcvr.lookahead = func() int { return parserchar } + defer func() { + // Make sure we report no lookahead when not parsing. + parserstate = -1 + parserchar = -1 + parsertoken = -1 + }() parserp := -1 goto parserstack @@ -207,7 +316,7 @@ ret1: parserstack: /* put a state and value onto the stack */ if parserDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate)) + __yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate)) } parserp++ @@ -225,15 +334,16 @@ parsernewstate: goto parserdefault /* simple state */ } if parserchar < 0 { - parserchar = parserlex1(parserlex, &parserlval) + parserchar, parsertoken = parserlex1(parserlex, &parserlval) } - parsern += parserchar + parsern += parsertoken if parsern < 0 || parsern >= parserLast { goto parserdefault } parsern = parserAct[parsern] - if parserChk[parsern] == parserchar { /* valid shift */ + if parserChk[parsern] == parsertoken { /* valid shift */ parserchar = -1 + parsertoken = -1 parserVAL = parserlval parserstate = parsern if Errflag > 0 { @@ -247,7 +357,7 @@ parserdefault: parsern = parserDef[parserstate] if parsern == -2 { if parserchar < 0 { - parserchar = parserlex1(parserlex, &parserlval) + parserchar, parsertoken = parserlex1(parserlex, &parserlval) } /* look through exception table */ @@ -260,7 +370,7 @@ parserdefault: } for xi += 2; ; xi += 2 { parsern = parserExca[xi+0] - if parsern < 0 || parsern == parserchar { + if parsern < 0 || parsern == parsertoken { break } } @@ -273,11 +383,11 @@ parserdefault: /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ - parserlex.Error("syntax error") + parserlex.Error(parserErrorMessage(parserstate, parsertoken)) Nerrs++ if parserDebug >= 1 { __yyfmt__.Printf("%s", parserStatname(parserstate)) - __yyfmt__.Printf(" saw %s\n", parserTokname(parserchar)) + __yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken)) } fallthrough @@ -305,12 +415,13 @@ parserdefault: case 3: /* no shift yet; clobber input char */ if parserDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar)) + __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken)) } - if parserchar == parserEofCode { + if parsertoken == parserEofCode { goto ret1 } parserchar = -1 + parsertoken = -1 goto parsernewstate /* try again in the same state */ } } @@ -325,6 +436,13 @@ parserdefault: _ = parserpt // guard against "declared and not used" parserp -= parserR2[parsern] + // parserp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if parserp+1 >= len(parserS) { + nyys := make([]parserSymType, len(parserS)*2) + copy(nyys, parserS) + parserS = nyys + } parserVAL = parserS[parserp+1] /* consult goto table to find next state */ @@ -344,6 +462,7 @@ parserdefault: switch parsernt { case 1: + parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:35 { parserResult = &ast.LiteralNode{ @@ -353,9 +472,10 @@ parserdefault: } } case 2: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:43 { - parserResult = parserS[parserpt-0].node + parserResult = parserDollar[1].node // We want to make sure that the top value is always a Concat // so that the return value is always a string type from an @@ -365,28 +485,30 @@ parserdefault: // because functionally the AST is the same, but we do that because // it makes for an easy literal check later (to check if a string // has any interpolations). - if _, ok := parserS[parserpt-0].node.(*ast.Concat); !ok { - if n, ok := parserS[parserpt-0].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { + if _, ok := parserDollar[1].node.(*ast.Concat); !ok { + if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { parserResult = &ast.Concat{ - Exprs: []ast.Node{parserS[parserpt-0].node}, - Posx: parserS[parserpt-0].node.Pos(), + Exprs: []ast.Node{parserDollar[1].node}, + Posx: parserDollar[1].node.Pos(), } } } } case 3: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:66 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 4: + parserDollar = parserS[parserpt-2 : parserpt+1] //line lang.y:70 { var result []ast.Node - if c, ok := parserS[parserpt-1].node.(*ast.Concat); ok { - result = append(c.Exprs, parserS[parserpt-0].node) + if c, ok := parserDollar[1].node.(*ast.Concat); ok { + result = append(c.Exprs, parserDollar[2].node) } else { - result = []ast.Node{parserS[parserpt-1].node, parserS[parserpt-0].node} + result = []ast.Node{parserDollar[1].node, parserDollar[2].node} } parserVAL.node = &ast.Concat{ @@ -395,89 +517,103 @@ parserdefault: } } case 5: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:86 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 6: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:90 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 7: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:96 { - parserVAL.node = parserS[parserpt-1].node + parserVAL.node = parserDollar[2].node } case 8: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:102 { - parserVAL.node = parserS[parserpt-1].node + parserVAL.node = parserDollar[2].node } case 9: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:106 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 10: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:110 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(int), + Value: parserDollar[1].token.Value.(int), Typex: ast.TypeInt, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } case 11: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:118 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(float64), + Value: parserDollar[1].token.Value.(float64), Typex: ast.TypeFloat, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } case 12: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:126 { parserVAL.node = &ast.Arithmetic{ - Op: parserS[parserpt-1].token.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{parserS[parserpt-2].node, parserS[parserpt-0].node}, - Posx: parserS[parserpt-2].node.Pos(), + Op: parserDollar[2].token.Value.(ast.ArithmeticOp), + Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node}, + Posx: parserDollar[1].node.Pos(), } } case 13: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:134 { - parserVAL.node = &ast.VariableAccess{Name: parserS[parserpt-0].token.Value.(string), Posx: parserS[parserpt-0].token.Pos} + parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos} } case 14: + parserDollar = parserS[parserpt-4 : parserpt+1] //line lang.y:138 { - parserVAL.node = &ast.Call{Func: parserS[parserpt-3].token.Value.(string), Args: parserS[parserpt-1].nodeList, Posx: parserS[parserpt-3].token.Pos} + parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos} } case 15: + parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:143 { parserVAL.nodeList = nil } case 16: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:147 { - parserVAL.nodeList = append(parserS[parserpt-2].nodeList, parserS[parserpt-0].node) + parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node) } case 17: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:151 { - parserVAL.nodeList = append(parserVAL.nodeList, parserS[parserpt-0].node) + parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node) } case 18: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:157 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(string), + Value: parserDollar[1].token.Value.(string), Typex: ast.TypeString, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } } diff --git a/config/module/copy_dir.go b/config/module/copy_dir.go new file mode 100644 index 000000000..f2ae63b77 --- /dev/null +++ b/config/module/copy_dir.go @@ -0,0 +1,76 @@ +package module + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} diff --git a/config/module/get.go b/config/module/get.go index 3820e65f2..cba15277f 100644 --- a/config/module/get.go +++ b/config/module/get.go @@ -1,6 +1,9 @@ package module import ( + "io/ioutil" + "os" + "github.com/hashicorp/go-getter" ) @@ -23,6 +26,36 @@ const ( GetModeUpdate ) +// GetCopy is the same as Get except that it downloads a copy of the +// module represented by source. +// +// This copy will omit and dot-prefixed files (such as .git/, .hg/) and +// can't be updated on its own. +func GetCopy(dst, src string) error { + // Create the temporary directory to do the real Get to + tmpDir, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + // Get to that temporary dir + if err := getter.Get(tmpDir, src); err != nil { + return err + } + + // Make sure the destination exists + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + // Copy to the final location + return copyDir(dst, tmpDir) +} + func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { // Get the module with the level specified if we were told to. if mode > GetModeNone { diff --git a/helper/resource/testing.go b/helper/resource/testing.go index eaa0cbf71..0b53c3c61 100644 --- a/helper/resource/testing.go +++ b/helper/resource/testing.go @@ -11,6 +11,7 @@ import ( "strings" "testing" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" ) @@ -198,7 +199,7 @@ func testStep( } // Load the modules - modStorage := &module.FolderStorage{ + modStorage := &getter.FolderStorage{ StorageDir: filepath.Join(cfgPath, ".tfmodules"), } err = mod.Load(modStorage, module.GetModeGet) From 263cc1b8553545682316894ea8382c7d724a81ee Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 15 Oct 2015 13:52:27 -0700 Subject: [PATCH 028/100] terraform: final failing test --- terraform/terraform_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 02d4de2a2..d17726acb 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -13,6 +13,7 @@ import ( "sync" "testing" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" ) @@ -70,7 +71,7 @@ func testModule(t *testing.T, name string) *module.Tree { t.Fatalf("err: %s", err) } - s := &module.FolderStorage{StorageDir: tempDir(t)} + s := &getter.FolderStorage{StorageDir: tempDir(t)} if err := mod.Load(s, module.GetModeGet); err != nil { t.Fatalf("err: %s", err) } From 05007bed38db92072c492c057a2b4613d59022f9 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 16 Oct 2015 09:11:39 -0500 Subject: [PATCH 029/100] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 81316bf54..a61584785 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 0.6.5 (Unreleased) +INTERNAL IMPROVEMENTS: + + * provider/digitalocean: use official Go client [GH-3333] + ## 0.6.4 (October 15, 2015) FEATURES: From 347f9c0bea68722a85a1e453c69e6a756043f6b8 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Fri, 16 Oct 2015 14:00:23 -0500 Subject: [PATCH 030/100] vagrantfile: update base image name to Bento, from Chef --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 8a936e04c..59709339d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -42,7 +42,7 @@ source /etc/profile.d/gopath.sh SCRIPT Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "chef/ubuntu-12.04" + config.vm.box = "bento/ubuntu-12.04" config.vm.hostname = "terraform" config.vm.provision "shell", inline: $script, privileged: false From c2fdb7171e4d84cc3f37b8e2163b9d43aa6306e5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 17 Oct 2015 17:33:45 -0700 Subject: [PATCH 031/100] use upstream osext, which fixes some bugs --- config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.go b/config.go index 648223888..c9b2a7f75 100644 --- a/config.go +++ b/config.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/hcl" "github.com/hashicorp/terraform/plugin" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/osext" + "github.com/kardianos/osext" ) // Config is the structure of the configuration for the Terraform CLI. From 593077161589a9ba920324450d0634d93834b20e Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Sun, 18 Oct 2015 13:21:41 -0400 Subject: [PATCH 032/100] Update compute_instance.html.markdown Make it clear that you can't have two networks --- .../docs/providers/google/r/compute_instance.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index bf8add9e6..938bc71df 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -82,8 +82,8 @@ The following arguments are supported: are not allowed to be used simultaneously. * `network_interface` - (Required) Networks to attach to the instance. This can be - specified multiple times for multiple networks. Structure is documented - below. + specified multiple times for multiple networks, but GCE is currently limited + to just 1. Structure is documented below. * `network` - (DEPRECATED, Required) Networks to attach to the instance. This can be specified multiple times for multiple networks. Structure is documented From bb51882f337e80ca85b75069155ed03c52636dd3 Mon Sep 17 00:00:00 2001 From: Nathan Zadoks Date: Mon, 12 Oct 2015 17:04:58 -0400 Subject: [PATCH 033/100] Etcd remote state backend --- state/remote/etcd.go | 78 +++++++++++++++++++++++++++++++++++++++ state/remote/etcd_test.go | 38 +++++++++++++++++++ state/remote/remote.go | 1 + 3 files changed, 117 insertions(+) create mode 100644 state/remote/etcd.go create mode 100644 state/remote/etcd_test.go diff --git a/state/remote/etcd.go b/state/remote/etcd.go new file mode 100644 index 000000000..f596a8492 --- /dev/null +++ b/state/remote/etcd.go @@ -0,0 +1,78 @@ +package remote + +import ( + "crypto/md5" + "fmt" + "strings" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + etcdapi "github.com/coreos/etcd/client" +) + +func etcdFactory(conf map[string]string) (Client, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("missing 'path' configuration") + } + + endpoints, ok := conf["endpoints"] + if !ok || endpoints == "" { + return nil, fmt.Errorf("missing 'endpoints' configuration") + } + + config := etcdapi.Config{ + Endpoints: strings.Split(endpoints, " "), + } + if username, ok := conf["username"]; ok && username != "" { + config.Username = username + } + if password, ok := conf["password"]; ok && password != "" { + config.Password = password + } + + client, err := etcdapi.New(config) + if err != nil { + return nil, err + } + + return &EtcdClient{ + Client: client, + Path: path, + }, nil +} + +// EtcdClient is a remote client that stores data in etcd. +type EtcdClient struct { + Client etcdapi.Client + Path string +} + +func (c *EtcdClient) Get() (*Payload, error) { + resp, err := etcdapi.NewKeysAPI(c.Client).Get(context.Background(), c.Path, &etcdapi.GetOptions{Quorum: true}) + if err != nil { + if err, ok := err.(etcdapi.Error); ok && err.Code == etcdapi.ErrorCodeKeyNotFound { + return nil, nil + } + return nil, err + } + if resp.Node.Dir { + return nil, fmt.Errorf("path is a directory") + } + + data := []byte(resp.Node.Value) + md5 := md5.Sum(data) + return &Payload{ + Data: data, + MD5: md5[:], + }, nil +} + +func (c *EtcdClient) Put(data []byte) error { + _, err := etcdapi.NewKeysAPI(c.Client).Set(context.Background(), c.Path, string(data), nil) + return err +} + +func (c *EtcdClient) Delete() error { + _, err := etcdapi.NewKeysAPI(c.Client).Delete(context.Background(), c.Path, nil) + return err +} diff --git a/state/remote/etcd_test.go b/state/remote/etcd_test.go new file mode 100644 index 000000000..6d06d801b --- /dev/null +++ b/state/remote/etcd_test.go @@ -0,0 +1,38 @@ +package remote + +import ( + "fmt" + "os" + "testing" + "time" +) + +func TestEtcdClient_impl(t *testing.T) { + var _ Client = new(EtcdClient) +} + +func TestEtcdClient(t *testing.T) { + endpoint := os.Getenv("ETCD_ENDPOINT") + if endpoint == "" { + t.Skipf("skipping; ETCD_ENDPOINT must be set") + } + + config := map[string]string{ + "endpoints": endpoint, + "path": fmt.Sprintf("tf-unit/%s", time.Now().String()), + } + + if username := os.Getenv("ETCD_USERNAME"); username != "" { + config["username"] = username + } + if password := os.Getenv("ETCD_PASSWORD"); password != "" { + config["password"] = password + } + + client, err := etcdFactory(config) + if err != nil { + t.Fatalf("Error for valid config: %s", err) + } + + testClient(t, client) +} diff --git a/state/remote/remote.go b/state/remote/remote.go index 7ebea3222..5337ad7b7 100644 --- a/state/remote/remote.go +++ b/state/remote/remote.go @@ -38,6 +38,7 @@ func NewClient(t string, conf map[string]string) (Client, error) { var BuiltinClients = map[string]Factory{ "atlas": atlasFactory, "consul": consulFactory, + "etcd": etcdFactory, "http": httpFactory, "s3": s3Factory, "swift": swiftFactory, From 362a2035c0bb709162ab75d47ef6db2b23bcef56 Mon Sep 17 00:00:00 2001 From: Nathan Zadoks Date: Thu, 15 Oct 2015 22:32:59 -0400 Subject: [PATCH 034/100] Document the etcd remote state backend --- website/source/docs/commands/remote-config.html.markdown | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown index 73a06f821..aaa4148fc 100644 --- a/website/source/docs/commands/remote-config.html.markdown +++ b/website/source/docs/commands/remote-config.html.markdown @@ -50,6 +50,11 @@ The following backends are supported: variables can optionally be provided. Address is assumed to be the local agent if not provided. +* Etcd - Stores the state in etcd at a given path. + Requires the `path` and `endpoints` variables. The `username` and `password` + variables can optionally be provided. `endpoints` is assumed to be a + space-separated list of etcd endpoints. + * S3 - Stores the state as a given key in a given bucket on Amazon S3. Requires the `bucket` and `key` variables. Supports and honors the standard AWS environment variables `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` From 3021069207bf24c8c66f2877fd5d6eafea0d2c02 Mon Sep 17 00:00:00 2001 From: David Adams Date: Sun, 18 Oct 2015 18:23:13 -0500 Subject: [PATCH 035/100] Update init and remote config command docs * Update init docs to be correct, and provide an example. * Update remote config docs to provide more details about the Consul backend and to provide another example. --- .../source/docs/commands/init.html.markdown | 37 ++++++++++++++----- .../docs/commands/remote-config.html.markdown | 16 ++++++-- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/website/source/docs/commands/init.html.markdown b/website/source/docs/commands/init.html.markdown index ee4286c27..803d937d7 100644 --- a/website/source/docs/commands/init.html.markdown +++ b/website/source/docs/commands/init.html.markdown @@ -31,17 +31,34 @@ a remote state configuration if provided. The command-line flags are all optional. The list of available flags are: -* `-address=url` - URL of the remote storage server. Required for HTTP backend, - optional for Atlas and Consul. - -* `-access-token=token` - Authentication token for state storage server. - Required for Atlas backend, optional for Consul. - * `-backend=atlas` - Specifies the type of remote backend. Must be one - of Atlas, Consul, or HTTP. Defaults to atlas. + of Atlas, Consul, S3, or HTTP. Defaults to Atlas. -* `-name=name` - Name of the state file in the state storage server. - Required for Atlas backend. +* `-backend-config="k=v"` - Specify a configuration variable for a backend. This is how you set the required variables for the selected backend (as detailed in the [remote command documentation](/docs/command/remote.html). -* `-path=path` - Path of the remote state in Consul. Required for the Consul backend. +## Example: Consul + +This example will initialize the current directory and configure Consul remote storage: + +``` +$ terraform init \ + -backend=consul \ + -backend-config="address=your.consul.endpoint:443" \ + -backend-config="scheme=https" \ + -backend-config="path=tf/path/for/project" \ + /path/to/source/module +``` + +## Example: S3 + +This example will initialize the current directory and configure S3 remote storage: + +``` +$ terraform init \ + -backend=s3 \ + -backend-config="bucket=your-s3-bucket" \ + -backend-config="key=tf/path/for/project.json" \ + -backend-config="acl=bucket-owner-full-control" \ + /path/to/source/module +``` diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown index 73a06f821..cd6f8f9d3 100644 --- a/website/source/docs/commands/remote-config.html.markdown +++ b/website/source/docs/commands/remote-config.html.markdown @@ -45,10 +45,18 @@ The following backends are supported: * Atlas - Stores the state in Atlas. Requires the `name` and `access_token` variables. The `address` variable can optionally be provided. -* Consul - Stores the state in the KV store at a given path. - Requires the `path` variable. The `address` and `access_token` - variables can optionally be provided. Address is assumed to be the - local agent if not provided. +* Consul - Stores the state in the KV store at a given path. Requires the + `path` variable. Supports the `CONSUL_HTTP_TOKEN` environment variable + for specifying access credentials, or the `access_token` variable may + be provided, but this is not recommended since it would be included in + cleartext inside the persisted, shard state. Other supported parameters + include: + * `address` - DNS name and port of your Consul endpoint specified in the + format `dnsname:port`. Defaults to the local agent HTTP listener. This + may also be specified using the `CONSUL_HTTP_ADDR` environment variable. + * `scheme` - Specifies what protocol to use when talking to the given + `address`, either `http` or `https`. SSL support can also be triggered + by setting then environment variable `CONSUL_HTTP_SSL` to `true`. * S3 - Stores the state as a given key in a given bucket on Amazon S3. Requires the `bucket` and `key` variables. Supports and honors the standard From 3c0ed11922f17bb48ac14c76e3242070d3b82f2e Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 16 Oct 2015 17:17:35 -0400 Subject: [PATCH 036/100] Remove usage of http.DefaultClient --- state/remote/atlas.go | 9 ++++++--- state/remote/http_test.go | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/state/remote/atlas.go b/state/remote/atlas.go index f52d834a2..2c2c48895 100644 --- a/state/remote/atlas.go +++ b/state/remote/atlas.go @@ -83,7 +83,8 @@ func (c *AtlasClient) Get() (*Payload, error) { } // Request the url - resp, err := http.DefaultClient.Do(req) + client := &http.Client{} + resp, err := client.Do(req) if err != nil { return nil, err } @@ -161,7 +162,8 @@ func (c *AtlasClient) Put(state []byte) error { req.ContentLength = int64(len(state)) // Make the request - resp, err := http.DefaultClient.Do(req) + client := &http.Client{} + resp, err := client.Do(req) if err != nil { return fmt.Errorf("Failed to upload state: %v", err) } @@ -186,7 +188,8 @@ func (c *AtlasClient) Delete() error { } // Make the request - resp, err := http.DefaultClient.Do(req) + client := &http.Client{} + resp, err := client.Do(req) if err != nil { return fmt.Errorf("Failed to delete state: %v", err) } diff --git a/state/remote/http_test.go b/state/remote/http_test.go index e6e7297c1..74ed1755a 100644 --- a/state/remote/http_test.go +++ b/state/remote/http_test.go @@ -24,7 +24,7 @@ func TestHTTPClient(t *testing.T) { t.Fatalf("err: %s", err) } - client := &HTTPClient{URL: url, Client: http.DefaultClient} + client := &HTTPClient{URL: url, Client: &http.Client{}} testClient(t, client) } From b0ceffc322efabc3ad2ff4bf41090eab25053bbe Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 19 Oct 2015 12:04:10 -0400 Subject: [PATCH 037/100] Remove usage from dependencies as well. Other dependencies need upstream merging to completely solve this. --- builtin/providers/aws/config.go | 3 +++ builtin/providers/dme/config.go | 6 +++++- state/remote/s3.go | 2 ++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index f8f443b73..8b9428fbc 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "net/http" "strings" "github.com/hashicorp/go-multierror" @@ -98,6 +99,7 @@ func (c *Config) Client() (interface{}, error) { Credentials: creds, Region: aws.String(c.Region), MaxRetries: aws.Int(c.MaxRetries), + HTTPClient: &http.Client{}, } log.Println("[INFO] Initializing IAM Connection") @@ -123,6 +125,7 @@ func (c *Config) Client() (interface{}, error) { Credentials: creds, Region: aws.String("us-east-1"), MaxRetries: aws.Int(c.MaxRetries), + HTTPClient: &http.Client{}, } log.Println("[INFO] Initializing DynamoDB connection") diff --git a/builtin/providers/dme/config.go b/builtin/providers/dme/config.go index 514df0d10..2d387673f 100644 --- a/builtin/providers/dme/config.go +++ b/builtin/providers/dme/config.go @@ -2,8 +2,10 @@ package dme import ( "fmt" - "github.com/soniah/dnsmadeeasy" "log" + "net/http" + + "github.com/soniah/dnsmadeeasy" ) // Config contains DNSMadeEasy provider settings @@ -20,6 +22,8 @@ func (c *Config) Client() (*dnsmadeeasy.Client, error) { return nil, fmt.Errorf("Error setting up client: %s", err) } + client.HTTP = &http.Client{} + if c.UseSandbox { client.URL = dnsmadeeasy.SandboxURL } diff --git a/state/remote/s3.go b/state/remote/s3.go index bdc6a63cf..f6cfdfbde 100644 --- a/state/remote/s3.go +++ b/state/remote/s3.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "log" + "net/http" "os" "strconv" @@ -75,6 +76,7 @@ func s3Factory(conf map[string]string) (Client, error) { awsConfig := &aws.Config{ Credentials: credentialsProvider, Region: aws.String(regionName), + HTTPClient: &http.Client{}, } nativeClient := s3.New(awsConfig) From 5fa5c4bc535c7798b0ec792e02dda4495e0854bc Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 19 Oct 2015 13:03:28 -0400 Subject: [PATCH 038/100] Use new packngo API allowing passing in a custom http.Client --- builtin/providers/packet/config.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/packet/config.go b/builtin/providers/packet/config.go index 659ee9ebc..b7d408c62 100644 --- a/builtin/providers/packet/config.go +++ b/builtin/providers/packet/config.go @@ -1,6 +1,8 @@ package packet import ( + "net/http" + "github.com/packethost/packngo" ) @@ -14,5 +16,5 @@ type Config struct { // Client() returns a new client for accessing packet. func (c *Config) Client() *packngo.Client { - return packngo.NewClient(consumerToken, c.AuthToken) + return packngo.NewClient(consumerToken, c.AuthToken, &http.Client{}) } From 7a24da8c94733b474955fb8acd79ef8fc56f92f8 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 19 Oct 2015 18:43:49 -0700 Subject: [PATCH 039/100] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a61584785..007dedfcd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 0.6.5 (Unreleased) +FEATURES: + + * New remote state backend: `etcd` [GH-3487] + INTERNAL IMPROVEMENTS: * provider/digitalocean: use official Go client [GH-3333] From fca44bdec3a1510a413813124b074cfd2ea08829 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 20 Oct 2015 12:28:12 -0500 Subject: [PATCH 040/100] core: state metadata difference should bump serial Remote state includes MD5-based checksumming to protect against State conflicts. This can generate improper conflicts with states that differ only in their Schema version. We began to see this issue with https://github.com/hashicorp/terraform/pull/3470 which changes the "schema_version" of aws_key_pairs. --- terraform/state.go | 15 +++++++++ terraform/state_test.go | 72 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) diff --git a/terraform/state.go b/terraform/state.go index 21b2c04de..e97e0c27c 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -965,6 +965,21 @@ func (s *InstanceState) Equal(other *InstanceState) bool { } } + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + for k, v := range s.Meta { + otherV, ok := other.Meta[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + return true } diff --git a/terraform/state_test.go b/terraform/state_test.go index eeb974d0b..cc7b91bbc 100644 --- a/terraform/state_test.go +++ b/terraform/state_test.go @@ -188,6 +188,43 @@ func TestStateEqual(t *testing.T) { }, }, }, + + // Meta differs + { + false, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]string{ + "schema_version": "1", + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]string{ + "schema_version": "2", + }, + }, + }, + }, + }, + }, + }, + }, } for i, tc := range cases { @@ -224,6 +261,41 @@ func TestStateIncrementSerialMaybe(t *testing.T) { }, 1, }, + "S2 is different, but only via Instance Metadata": { + &State{ + Serial: 3, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]string{}, + }, + }, + }, + }, + }, + }, + &State{ + Serial: 3, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]string{ + "schema_version": "1", + }, + }, + }, + }, + }, + }, + }, + 4, + }, "S1 serial is higher": { &State{Serial: 5}, &State{ From d4f7cdc877721880c46c6d8bff726613532522b2 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 19 Oct 2015 15:38:23 -0400 Subject: [PATCH 041/100] GCP UserAgent now shows accurate Terraform version --- builtin/providers/google/config.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index 6bfa3553d..1198a7c0a 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -10,8 +10,7 @@ import ( "runtime" "strings" - // TODO(dcunnin): Use version code from version.go - // "github.com/hashicorp/terraform" + "github.com/hashicorp/terraform/terraform" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" @@ -117,13 +116,11 @@ func (c *Config) loadAndValidate() error { } - // Build UserAgent - versionString := "0.0.0" - // TODO(dcunnin): Use Terraform's version code from version.go - // versionString := main.Version - // if main.VersionPrerelease != "" { - // versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease) - // } + versionString := terraform.Version + prerelease := terraform.VersionPrerelease + if len(prerelease) > 0 { + versionString = fmt.Sprintf("%s-%s", versionString, prerelease) + } userAgent := fmt.Sprintf( "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) From bba2c3221d4b16e1aa8fe26e4c6cba2b4e318380 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 19 Oct 2015 15:27:41 -0400 Subject: [PATCH 042/100] Added oauth2 support for GCP --- builtin/providers/google/config.go | 28 ++++++++++++---------------- builtin/providers/google/provider.go | 6 +++++- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index 6bfa3553d..120c578e1 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -36,6 +36,13 @@ type Config struct { func (c *Config) loadAndValidate() error { var account accountFile + clientScopes := []string{ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", + } + if c.AccountFile == "" { c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") @@ -79,13 +86,6 @@ func (c *Config) loadAndValidate() error { } } - clientScopes := []string{ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - } - // Get the token for use in our requests log.Printf("[INFO] Requesting Google token...") log.Printf("[INFO] -- Email: %s", account.ClientEmail) @@ -105,16 +105,12 @@ func (c *Config) loadAndValidate() error { client = conf.Client(oauth2.NoContext) } else { - log.Printf("[INFO] Requesting Google token via GCE Service Role...") - client = &http.Client{ - Transport: &oauth2.Transport{ - // Fetch from Google Compute Engine's metadata server to retrieve - // an access token for the provided account. - // If no account is specified, "default" is used. - Source: google.ComputeTokenSource(""), - }, + log.Printf("[INFO] Authenticating using DefaultClient"); + err := error(nil) + client, err = google.DefaultClient(oauth2.NoContext, clientScopes...) + if err != nil { + return err } - } // Build UserAgent diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 7c9587219..acafd851c 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -15,7 +15,7 @@ func Provider() terraform.ResourceProvider { Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), ValidateFunc: validateAccountFile, }, @@ -78,6 +78,10 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { } func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) { + if v == nil { + return + } + value := v.(string) if value == "" { From 05c0998d2d84d7b06743761e9858a17470935cdb Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 20 Oct 2015 14:33:28 -0500 Subject: [PATCH 043/100] core: store deeply nested modules in a consistent order in the state We were only comparing the last element of the module, which meant that deeply nested modules with the same name but different ancestry had an undefined sort order, which could cause inconsistencies in state storage and potentially break remote state MD5 checksumming. --- terraform/state.go | 5 ++--- terraform/state_test.go | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/terraform/state.go b/terraform/state.go index e97e0c27c..8734cfc17 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -1207,9 +1207,8 @@ func (s moduleStateSort) Less(i, j int) bool { return len(a.Path) < len(b.Path) } - // Otherwise, compare by last path element - idx := len(a.Path) - 1 - return a.Path[idx] < b.Path[idx] + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") } func (s moduleStateSort) Swap(i, j int) { diff --git a/terraform/state_test.go b/terraform/state_test.go index cc7b91bbc..8d24a8e75 100644 --- a/terraform/state_test.go +++ b/terraform/state_test.go @@ -40,6 +40,23 @@ func TestStateAddModule(t *testing.T) { []string{"root", "foo", "bar"}, }, }, + // Same last element, different middle element + { + [][]string{ + []string{"root", "foo", "bar"}, // This one should sort after... + []string{"root", "foo"}, + []string{"root"}, + []string{"root", "bar", "bar"}, // ...this one. + []string{"root", "bar"}, + }, + [][]string{ + []string{"root"}, + []string{"root", "bar"}, + []string{"root", "foo"}, + []string{"root", "bar", "bar"}, + []string{"root", "foo", "bar"}, + }, + }, } for _, tc := range cases { From e59fb4e6ca2e6c184acbdc3c7e14d07f0b2e0a83 Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Sun, 19 Jul 2015 00:09:00 -0400 Subject: [PATCH 044/100] aws: Add support for "aws_codedeploy_app" resources. --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_codedeploy_app.go | 127 ++++++++++++++++++ 3 files changed, 133 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_codedeploy_app.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 8b9428fbc..dfd8b1b2e 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/codedeploy" "github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" @@ -70,6 +71,7 @@ type AWSClient struct { lambdaconn *lambda.Lambda opsworksconn *opsworks.OpsWorks glacierconn *glacier.Glacier + codedeployconn *codedeploy.CodeDeploy } // Client configures and returns a fully initialized AWSClient @@ -192,6 +194,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing Glacier connection") client.glacierconn = glacier.New(awsConfig) + + log.Println("[INFO] Initializing CodeDeploy Connection") + client.codedeployconn = codedeploy.New(awsConfig) } if len(errs) > 0 { diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index f73580d0f..132fa4678 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -166,6 +166,7 @@ func Provider() terraform.ResourceProvider { "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), + "aws_codedeploy_app": resourceAwsCodeDeployApp(), "aws_customer_gateway": resourceAwsCustomerGateway(), "aws_db_instance": resourceAwsDbInstance(), "aws_db_parameter_group": resourceAwsDbParameterGroup(), diff --git a/builtin/providers/aws/resource_aws_codedeploy_app.go b/builtin/providers/aws/resource_aws_codedeploy_app.go new file mode 100644 index 000000000..ccf07a82d --- /dev/null +++ b/builtin/providers/aws/resource_aws_codedeploy_app.go @@ -0,0 +1,127 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +func resourceAwsCodeDeployApp() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeDeployAppCreate, + Read: resourceAwsCodeDeployAppRead, + Update: resourceAwsCodeDeployUpdate, + Delete: resourceAwsCodeDeployAppDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // The unique ID is set by AWS on create. + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsCodeDeployAppCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + application := d.Get("name").(string) + log.Printf("[DEBUG] Creating CodeDeploy application %s", application) + + resp, err := conn.CreateApplication(&codedeploy.CreateApplicationInput{ + ApplicationName: aws.String(application), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] CodeDeploy application %s created", *resp.ApplicationId) + + // Despite giving the application a unique ID, AWS doesn't actually use + // it in API calls. Use it and the app name to identify the resource in + // the state file. This allows us to reliably detect both when the TF + // config file changes and when the user deletes the app without removing + // it first from the TF config. + d.SetId(fmt.Sprintf("%s:%s", *resp.ApplicationId, application)) + + return resourceAwsCodeDeployAppRead(d, meta) +} + +func resourceAwsCodeDeployAppRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + _, application := resourceAwsCodeDeployAppParseId(d.Id()) + log.Printf("[DEBUG] Reading CodeDeploy application %s", application) + resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{ + ApplicationName: aws.String(application), + }) + if err != nil { + if codedeployerr, ok := err.(awserr.Error); ok && codedeployerr.Code() == "ApplicationDoesNotExistException" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error finding CodeDeploy application: %s", err) + return err + } + } + + d.Set("name", *resp.Application.ApplicationName) + + return nil +} + +func resourceAwsCodeDeployUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + o, n := d.GetChange("name") + + _, err := conn.UpdateApplication(&codedeploy.UpdateApplicationInput{ + ApplicationName: aws.String(o.(string)), + NewApplicationName: aws.String(n.(string)), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] CodeDeploy application %s updated", n) + + d.Set("name", n) + + return nil +} + +func resourceAwsCodeDeployAppDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + _, err := conn.DeleteApplication(&codedeploy.DeleteApplicationInput{ + ApplicationName: aws.String(d.Get("name").(string)), + }) + if err != nil { + if cderr, ok := err.(awserr.Error); ok && cderr.Code() == "InvalidApplicationNameException" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error deleting CodeDeploy application: %s", err) + return err + } + } + + return nil +} + +func resourceAwsCodeDeployAppParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} From 42c077700a26b4cf65999db199030ac88bc68d14 Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Sun, 19 Jul 2015 00:09:25 -0400 Subject: [PATCH 045/100] aws: Add acceptance tests for "aws_codedeploy_app" resources. --- .../aws/resource_aws_codedeploy_app_test.go | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_codedeploy_app_test.go diff --git a/builtin/providers/aws/resource_aws_codedeploy_app_test.go b/builtin/providers/aws/resource_aws_codedeploy_app_test.go new file mode 100644 index 000000000..9c016f184 --- /dev/null +++ b/builtin/providers/aws/resource_aws_codedeploy_app_test.go @@ -0,0 +1,78 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCodeDeployApp_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeDeployAppDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCodeDeployApp, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"), + ), + }, + resource.TestStep{ + Config: testAccAWSCodeDeployAppModifier, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"), + ), + }, + }, + }) +} + +func testAccCheckAWSCodeDeployAppDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).codedeployconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_codedeploy_app" { + continue + } + + resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{ + ApplicationName: aws.String(rs.Primary.ID), + }) + + if err == nil { + if resp.Application != nil { + return fmt.Errorf("CodeDeploy app still exists:\n%#v", *resp.Application.ApplicationId) + } + } + + return err + } + + return nil +} + +func testAccCheckAWSCodeDeployAppExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + return nil + } +} + +var testAccAWSCodeDeployApp = ` +resource "aws_codedeploy_app" "foo" { + name = "foo" +}` + +var testAccAWSCodeDeployAppModifier = ` +resource "aws_codedeploy_app" "foo" { + name = "bar" +}` From fa3dfd1420dea811e8b0992092a43afaee703e8e Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Sun, 19 Jul 2015 00:07:43 -0400 Subject: [PATCH 046/100] aws: Add documentation for "aws_codedeploy_app" resources. --- .../aws/r/codedeploy_app.html.markdown | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 website/source/docs/providers/aws/r/codedeploy_app.html.markdown diff --git a/website/source/docs/providers/aws/r/codedeploy_app.html.markdown b/website/source/docs/providers/aws/r/codedeploy_app.html.markdown new file mode 100644 index 000000000..054fd1eda --- /dev/null +++ b/website/source/docs/providers/aws/r/codedeploy_app.html.markdown @@ -0,0 +1,32 @@ +--- +layout: "aws" +page_title: "AWS: aws_codedeploy_app" +sidebar_current: "docs-aws-resource-codedeploy-app" +description: |\ + Provides a CodeDeploy application. +--- + +# aws\_codedeploy\_app + +Provides a CodeDeploy application to be used as a basis for deployments + +## Example Usage + +``` +resource "aws_codedeploy_app" "foo" { + name = "foo" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the application. + +## Attribute Reference + +The following arguments are exported: + +* `id` - Amazon's assigned ID for the application. +* `name` - The application's name. From a546a12c2dbbeb6215c4c16ce5e6dbf3a00554dc Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Tue, 20 Oct 2015 18:03:57 -0400 Subject: [PATCH 047/100] aws: Add support for aws_codedeploy_deployment_group resources --- builtin/providers/aws/provider.go | 1 + ...esource_aws_codedeploy_deployment_group.go | 375 ++++++++++++++++++ 2 files changed, 376 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_codedeploy_deployment_group.go diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 132fa4678..fed004741 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -167,6 +167,7 @@ func Provider() terraform.ResourceProvider { "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_codedeploy_app": resourceAwsCodeDeployApp(), + "aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(), "aws_customer_gateway": resourceAwsCustomerGateway(), "aws_db_instance": resourceAwsDbInstance(), "aws_db_parameter_group": resourceAwsDbParameterGroup(), diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go new file mode 100644 index 000000000..a9f3acb07 --- /dev/null +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go @@ -0,0 +1,375 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +func resourceAwsCodeDeployDeploymentGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeDeployDeploymentGroupCreate, + Read: resourceAwsCodeDeployDeploymentGroupRead, + Update: resourceAwsCodeDeployDeploymentGroupUpdate, + Delete: resourceAwsCodeDeployDeploymentGroupDelete, + + Schema: map[string]*schema.Schema{ + "application_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "deployment_group_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "service_role_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "autoscaling_groups": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "deployment_config_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "CodeDeployDefault.OneAtATime", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "ec2_tag_filter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateTagFilters, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsCodeDeployTagFilterHash, + }, + + "on_premises_instance_tag_filter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateTagFilters, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsCodeDeployTagFilterHash, + }, + }, + } +} + +func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + application := d.Get("application_name").(string) + deploymentGroup := d.Get("deployment_group_name").(string) + + input := codedeploy.CreateDeploymentGroupInput{ + ApplicationName: aws.String(application), + DeploymentGroupName: aws.String(deploymentGroup), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + } + if attr, ok := d.GetOk("deployment_config_name"); ok { + input.DeploymentConfigName = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("autoscaling_groups"); ok { + input.AutoScalingGroups = expandStringList(attr.(*schema.Set).List()) + } + if attr, ok := d.GetOk("on_premises_instance_tag_filters"); ok { + onPremFilters := buildOnPremTagFilters(attr.(*schema.Set).List()) + input.OnPremisesInstanceTagFilters = onPremFilters + } + if attr, ok := d.GetOk("ec2_tag_filter"); ok { + ec2TagFilters := buildEC2TagFilters(attr.(*schema.Set).List()) + input.Ec2TagFilters = ec2TagFilters + } + + // Retry to handle IAM role eventual consistency. + var resp *codedeploy.CreateDeploymentGroupOutput + var err error + err = resource.Retry(2*time.Minute, func() error { + resp, err = conn.CreateDeploymentGroup(&input) + if err != nil { + codedeployErr, ok := err.(awserr.Error) + if !ok { + return &resource.RetryError{Err: err} + } + if codedeployErr.Code() == "InvalidRoleException" { + log.Printf("[DEBUG] Trying to create deployment group again: %q", + codedeployErr.Message()) + return err + } + + return &resource.RetryError{Err: err} + } + return nil + }) + if err != nil { + return err + } + + d.SetId(*resp.DeploymentGroupId) + + return resourceAwsCodeDeployDeploymentGroupRead(d, meta) +} + +func resourceAwsCodeDeployDeploymentGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + log.Printf("[DEBUG] Reading CodeDeploy DeploymentGroup %s", d.Id()) + resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("application_name").(string)), + DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + }) + if err != nil { + return err + } + + d.Set("application_name", *resp.DeploymentGroupInfo.ApplicationName) + d.Set("autoscaling_groups", resp.DeploymentGroupInfo.AutoScalingGroups) + d.Set("deployment_config_name", *resp.DeploymentGroupInfo.DeploymentConfigName) + d.Set("deployment_group_name", *resp.DeploymentGroupInfo.DeploymentGroupName) + d.Set("service_role_arn", *resp.DeploymentGroupInfo.ServiceRoleArn) + if err := d.Set("ec2_tag_filter", ec2TagFiltersToMap(resp.DeploymentGroupInfo.Ec2TagFilters)); err != nil { + return err + } + if err := d.Set("on_premises_instance_tag_filter", onPremisesTagFiltersToMap(resp.DeploymentGroupInfo.OnPremisesInstanceTagFilters)); err != nil { + return err + } + + return nil +} + +func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + input := codedeploy.UpdateDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("application_name").(string)), + CurrentDeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + } + + if d.HasChange("autoscaling_groups") { + _, n := d.GetChange("autoscaling_groups") + input.AutoScalingGroups = expandStringList(n.(*schema.Set).List()) + } + if d.HasChange("deployment_config_name") { + _, n := d.GetChange("deployment_config_name") + input.DeploymentConfigName = aws.String(n.(string)) + } + if d.HasChange("deployment_group_name") { + _, n := d.GetChange("deployment_group_name") + input.NewDeploymentGroupName = aws.String(n.(string)) + } + + // TagFilters aren't like tags. They don't append. They simply replace. + if d.HasChange("on_premises_instance_tag_filter") { + _, n := d.GetChange("on_premises_instance_tag_filter") + onPremFilters := buildOnPremTagFilters(n.(*schema.Set).List()) + input.OnPremisesInstanceTagFilters = onPremFilters + } + if d.HasChange("ec2_tag_filter") { + _, n := d.GetChange("ec2_tag_filter") + ec2Filters := buildEC2TagFilters(n.(*schema.Set).List()) + input.Ec2TagFilters = ec2Filters + } + + log.Printf("[DEBUG] Updating CodeDeploy DeploymentGroup %s", d.Id()) + _, err := conn.UpdateDeploymentGroup(&input) + if err != nil { + return err + } + + return resourceAwsCodeDeployDeploymentGroupRead(d, meta) +} + +func resourceAwsCodeDeployDeploymentGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + log.Printf("[DEBUG] Deleting CodeDeploy DeploymentGroup %s", d.Id()) + _, err := conn.DeleteDeploymentGroup(&codedeploy.DeleteDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("application_name").(string)), + DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + }) + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +// buildOnPremTagFilters converts raw schema lists into a list of +// codedeploy.TagFilters. +func buildOnPremTagFilters(configured []interface{}) []*codedeploy.TagFilter { + filters := make([]*codedeploy.TagFilter, 0) + for _, raw := range configured { + var filter codedeploy.TagFilter + m := raw.(map[string]interface{}) + + filter.Key = aws.String(m["key"].(string)) + filter.Type = aws.String(m["type"].(string)) + filter.Value = aws.String(m["value"].(string)) + + filters = append(filters, &filter) + } + + return filters +} + +// buildEC2TagFilters converts raw schema lists into a list of +// codedeploy.EC2TagFilters. +func buildEC2TagFilters(configured []interface{}) []*codedeploy.EC2TagFilter { + filters := make([]*codedeploy.EC2TagFilter, 0) + for _, raw := range configured { + var filter codedeploy.EC2TagFilter + m := raw.(map[string]interface{}) + + filter.Key = aws.String(m["key"].(string)) + filter.Type = aws.String(m["type"].(string)) + filter.Value = aws.String(m["value"].(string)) + + filters = append(filters, &filter) + } + + return filters +} + +// ec2TagFiltersToMap converts lists of tag filters into a []map[string]string. +func ec2TagFiltersToMap(list []*codedeploy.EC2TagFilter) []map[string]string { + result := make([]map[string]string, 0, len(list)) + for _, tf := range list { + l := make(map[string]string) + if *tf.Key != "" { + l["key"] = *tf.Key + } + if *tf.Value != "" { + l["value"] = *tf.Value + } + if *tf.Type != "" { + l["type"] = *tf.Type + } + result = append(result, l) + } + return result +} + +// onPremisesTagFiltersToMap converts lists of on-prem tag filters into a []map[string]string. +func onPremisesTagFiltersToMap(list []*codedeploy.TagFilter) []map[string]string { + result := make([]map[string]string, 0, len(list)) + for _, tf := range list { + l := make(map[string]string) + if *tf.Key != "" { + l["key"] = *tf.Key + } + if *tf.Value != "" { + l["value"] = *tf.Value + } + if *tf.Type != "" { + l["type"] = *tf.Type + } + result = append(result, l) + } + return result +} + +// validateTagFilters confirms the "value" component of a tag filter is one of +// AWS's three allowed types. +func validateTagFilters(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "KEY_ONLY" && value != "VALUE_ONLY" && value != "KEY_AND_VALUE" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"KEY_ONLY\", \"VALUE_ONLY\", or \"KEY_AND_VALUE\"", k)) + } + return +} + +func resourceAwsCodeDeployTagFilterHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + // Nothing's actually required in tag filters, so we must check the + // presence of all values before attempting a hash. + if v, ok := m["key"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["value"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} From 390f226eb51d66cf9398a882d605658d9c683d44 Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Tue, 20 Oct 2015 18:04:15 -0400 Subject: [PATCH 048/100] aws: Add aws_codedeploy_deployment_group tests --- ...ce_aws_codedeploy_deployment_group_test.go | 199 ++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go new file mode 100644 index 000000000..d883b26b8 --- /dev/null +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go @@ -0,0 +1,199 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCodeDeployDeploymentGroup_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCodeDeployDeploymentGroup, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo"), + ), + }, + resource.TestStep{ + Config: testAccAWSCodeDeployDeploymentGroupModifier, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo"), + ), + }, + }, + }) +} + +func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).codedeployconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_codedeploy_deployment_group" { + continue + } + + resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ + ApplicationName: aws.String(rs.Primary.Attributes["application_name"]), + DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]), + }) + + if err == nil { + if resp.DeploymentGroupInfo.DeploymentGroupName != nil { + return fmt.Errorf("CodeDeploy deployment group still exists:\n%#v", *resp.DeploymentGroupInfo.DeploymentGroupName) + } + } + + return err + } + + return nil +} + +func testAccCheckAWSCodeDeployDeploymentGroupExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + return nil + } +} + +var testAccAWSCodeDeployDeploymentGroup = ` +resource "aws_codedeploy_app" "foo_app" { + name = "foo_app" +} + +resource "aws_iam_role_policy" "foo_policy" { + name = "foo_policy" + role = "${aws_iam_role.foo_role.id}" + policy = < Date: Tue, 20 Oct 2015 18:04:39 -0400 Subject: [PATCH 049/100] aws: Add docs for aws_codedeploy_deployment_group --- .../codedeploy_deployment_group.html.markdown | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown diff --git a/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown new file mode 100644 index 000000000..cb2417fed --- /dev/null +++ b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown @@ -0,0 +1,108 @@ +--- +layout: "aws" +page_title: "AWS: aws_codedeploy_deployment_group" +sidebar_current: "docs-aws-resource-codedeploy-deployment-group" +description: |\ + Provides a CodeDeploy deployment group. +--- + +# aws\_codedeploy\_deployment\_group + +Provides a CodeDeploy deployment group for an application + +## Example Usage + +``` +resource "aws_codedeploy_app" "foo_app" { + name = "foo_app" +} + +resource "aws_iam_role_policy" "foo_policy" { + name = "foo_policy" + role = "${aws_iam_role.foo_role.id}" + policy = < Date: Wed, 21 Oct 2015 09:49:23 -0500 Subject: [PATCH 050/100] config/lang: restore go1.4.3 generated code my theory is that @mitchellh checked in a go1.5 generated file in 344e7c26b5f116842932d0e6b6ad2f1a250526f4 --- config/lang/y.go | 272 ++++++++++++----------------------------------- 1 file changed, 68 insertions(+), 204 deletions(-) diff --git a/config/lang/y.go b/config/lang/y.go index fd0693f15..e7dd185ae 100644 --- a/config/lang/y.go +++ b/config/lang/y.go @@ -30,10 +30,7 @@ const INTEGER = 57355 const FLOAT = 57356 const STRING = 57357 -var parserToknames = [...]string{ - "$end", - "error", - "$unk", +var parserToknames = []string{ "PROGRAM_BRACKET_LEFT", "PROGRAM_BRACKET_RIGHT", "PROGRAM_STRING_START", @@ -47,7 +44,7 @@ var parserToknames = [...]string{ "FLOAT", "STRING", } -var parserStatenames = [...]string{} +var parserStatenames = []string{} const parserEofCode = 1 const parserErrCode = 2 @@ -56,7 +53,7 @@ const parserMaxDepth = 200 //line lang.y:165 //line yacctab:1 -var parserExca = [...]int{ +var parserExca = []int{ -1, 1, 1, -1, -2, 0, @@ -70,103 +67,75 @@ var parserStates []string const parserLast = 30 -var parserAct = [...]int{ +var parserAct = []int{ 9, 20, 16, 16, 7, 7, 3, 18, 10, 8, 1, 17, 14, 12, 13, 6, 6, 19, 8, 22, 15, 23, 24, 11, 2, 25, 16, 21, 4, 5, } -var parserPact = [...]int{ +var parserPact = []int{ 1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15, 0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000, -1000, 12, -9, -1000, 0, -9, } -var parserPgo = [...]int{ +var parserPgo = []int{ 0, 0, 29, 28, 23, 6, 27, 10, } -var parserR1 = [...]int{ +var parserR1 = []int{ 0, 7, 7, 4, 4, 5, 5, 2, 1, 1, 1, 1, 1, 1, 1, 6, 6, 6, 3, } -var parserR2 = [...]int{ +var parserR2 = []int{ 0, 0, 1, 1, 2, 1, 1, 3, 3, 1, 1, 1, 3, 1, 4, 0, 3, 1, 1, } -var parserChk = [...]int{ +var parserChk = []int{ -1000, -7, -4, -5, -3, -2, 15, 4, -5, -1, 8, -4, 13, 14, 12, 5, 11, -1, 8, -1, 9, -6, -1, 9, 10, -1, } -var parserDef = [...]int{ +var parserDef = []int{ 1, -2, 2, 3, 5, 6, 18, 0, 4, 0, 0, 9, 10, 11, 13, 7, 0, 0, 15, 12, 8, 0, 17, 14, 0, 16, } -var parserTok1 = [...]int{ +var parserTok1 = []int{ 1, } -var parserTok2 = [...]int{ +var parserTok2 = []int{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, } -var parserTok3 = [...]int{ +var parserTok3 = []int{ 0, } -var parserErrorMessages = [...]struct { - state int - token int - msg string -}{} - //line yaccpar:1 /* parser for yacc output */ -var ( - parserDebug = 0 - parserErrorVerbose = false -) +var parserDebug = 0 type parserLexer interface { Lex(lval *parserSymType) int Error(s string) } -type parserParser interface { - Parse(parserLexer) int - Lookahead() int -} - -type parserParserImpl struct { - lookahead func() int -} - -func (p *parserParserImpl) Lookahead() int { - return p.lookahead() -} - -func parserNewParser() parserParser { - p := &parserParserImpl{ - lookahead: func() int { return -1 }, - } - return p -} - const parserFlag = -1000 func parserTokname(c int) string { - if c >= 1 && c-1 < len(parserToknames) { - if parserToknames[c-1] != "" { - return parserToknames[c-1] + // 4 is TOKSTART above + if c >= 4 && c-4 < len(parserToknames) { + if parserToknames[c-4] != "" { + return parserToknames[c-4] } } return __yyfmt__.Sprintf("tok-%v", c) @@ -181,129 +150,51 @@ func parserStatname(s int) string { return __yyfmt__.Sprintf("state-%v", s) } -func parserErrorMessage(state, lookAhead int) string { - const TOKSTART = 4 - - if !parserErrorVerbose { - return "syntax error" - } - - for _, e := range parserErrorMessages { - if e.state == state && e.token == lookAhead { - return "syntax error: " + e.msg - } - } - - res := "syntax error: unexpected " + parserTokname(lookAhead) - - // To match Bison, suggest at most four expected tokens. - expected := make([]int, 0, 4) - - // Look for shiftable tokens. - base := parserPact[state] - for tok := TOKSTART; tok-1 < len(parserToknames); tok++ { - if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok { - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - } - - if parserDef[state] == -2 { - i := 0 - for parserExca[i] != -1 || parserExca[i+1] != state { - i += 2 - } - - // Look for tokens that we accept or reduce. - for i += 2; parserExca[i] >= 0; i += 2 { - tok := parserExca[i] - if tok < TOKSTART || parserExca[i+1] == 0 { - continue - } - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - - // If the default action is to accept or reduce, give up. - if parserExca[i+1] != 0 { - return res - } - } - - for i, tok := range expected { - if i == 0 { - res += ", expecting " - } else { - res += " or " - } - res += parserTokname(tok) - } - return res -} - -func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) { - token = 0 - char = lex.Lex(lval) +func parserlex1(lex parserLexer, lval *parserSymType) int { + c := 0 + char := lex.Lex(lval) if char <= 0 { - token = parserTok1[0] + c = parserTok1[0] goto out } if char < len(parserTok1) { - token = parserTok1[char] + c = parserTok1[char] goto out } if char >= parserPrivate { if char < parserPrivate+len(parserTok2) { - token = parserTok2[char-parserPrivate] + c = parserTok2[char-parserPrivate] goto out } } for i := 0; i < len(parserTok3); i += 2 { - token = parserTok3[i+0] - if token == char { - token = parserTok3[i+1] + c = parserTok3[i+0] + if c == char { + c = parserTok3[i+1] goto out } } out: - if token == 0 { - token = parserTok2[1] /* unknown char */ + if c == 0 { + c = parserTok2[1] /* unknown char */ } if parserDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char)) + __yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char)) } - return char, token + return c } func parserParse(parserlex parserLexer) int { - return parserNewParser().Parse(parserlex) -} - -func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int { var parsern int var parserlval parserSymType var parserVAL parserSymType - var parserDollar []parserSymType - _ = parserDollar // silence set and not used parserS := make([]parserSymType, parserMaxDepth) Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ parserstate := 0 parserchar := -1 - parsertoken := -1 // parserchar translated into internal numbering - parserrcvr.lookahead = func() int { return parserchar } - defer func() { - // Make sure we report no lookahead when not parsing. - parserstate = -1 - parserchar = -1 - parsertoken = -1 - }() parserp := -1 goto parserstack @@ -316,7 +207,7 @@ ret1: parserstack: /* put a state and value onto the stack */ if parserDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate)) + __yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate)) } parserp++ @@ -334,16 +225,15 @@ parsernewstate: goto parserdefault /* simple state */ } if parserchar < 0 { - parserchar, parsertoken = parserlex1(parserlex, &parserlval) + parserchar = parserlex1(parserlex, &parserlval) } - parsern += parsertoken + parsern += parserchar if parsern < 0 || parsern >= parserLast { goto parserdefault } parsern = parserAct[parsern] - if parserChk[parsern] == parsertoken { /* valid shift */ + if parserChk[parsern] == parserchar { /* valid shift */ parserchar = -1 - parsertoken = -1 parserVAL = parserlval parserstate = parsern if Errflag > 0 { @@ -357,7 +247,7 @@ parserdefault: parsern = parserDef[parserstate] if parsern == -2 { if parserchar < 0 { - parserchar, parsertoken = parserlex1(parserlex, &parserlval) + parserchar = parserlex1(parserlex, &parserlval) } /* look through exception table */ @@ -370,7 +260,7 @@ parserdefault: } for xi += 2; ; xi += 2 { parsern = parserExca[xi+0] - if parsern < 0 || parsern == parsertoken { + if parsern < 0 || parsern == parserchar { break } } @@ -383,11 +273,11 @@ parserdefault: /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ - parserlex.Error(parserErrorMessage(parserstate, parsertoken)) + parserlex.Error("syntax error") Nerrs++ if parserDebug >= 1 { __yyfmt__.Printf("%s", parserStatname(parserstate)) - __yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken)) + __yyfmt__.Printf(" saw %s\n", parserTokname(parserchar)) } fallthrough @@ -415,13 +305,12 @@ parserdefault: case 3: /* no shift yet; clobber input char */ if parserDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken)) + __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar)) } - if parsertoken == parserEofCode { + if parserchar == parserEofCode { goto ret1 } parserchar = -1 - parsertoken = -1 goto parsernewstate /* try again in the same state */ } } @@ -436,13 +325,6 @@ parserdefault: _ = parserpt // guard against "declared and not used" parserp -= parserR2[parsern] - // parserp is now the index of $0. Perform the default action. Iff the - // reduced production is ε, $1 is possibly out of range. - if parserp+1 >= len(parserS) { - nyys := make([]parserSymType, len(parserS)*2) - copy(nyys, parserS) - parserS = nyys - } parserVAL = parserS[parserp+1] /* consult goto table to find next state */ @@ -462,7 +344,6 @@ parserdefault: switch parsernt { case 1: - parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:35 { parserResult = &ast.LiteralNode{ @@ -472,10 +353,9 @@ parserdefault: } } case 2: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:43 { - parserResult = parserDollar[1].node + parserResult = parserS[parserpt-0].node // We want to make sure that the top value is always a Concat // so that the return value is always a string type from an @@ -485,30 +365,28 @@ parserdefault: // because functionally the AST is the same, but we do that because // it makes for an easy literal check later (to check if a string // has any interpolations). - if _, ok := parserDollar[1].node.(*ast.Concat); !ok { - if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { + if _, ok := parserS[parserpt-0].node.(*ast.Concat); !ok { + if n, ok := parserS[parserpt-0].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { parserResult = &ast.Concat{ - Exprs: []ast.Node{parserDollar[1].node}, - Posx: parserDollar[1].node.Pos(), + Exprs: []ast.Node{parserS[parserpt-0].node}, + Posx: parserS[parserpt-0].node.Pos(), } } } } case 3: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:66 { - parserVAL.node = parserDollar[1].node + parserVAL.node = parserS[parserpt-0].node } case 4: - parserDollar = parserS[parserpt-2 : parserpt+1] //line lang.y:70 { var result []ast.Node - if c, ok := parserDollar[1].node.(*ast.Concat); ok { - result = append(c.Exprs, parserDollar[2].node) + if c, ok := parserS[parserpt-1].node.(*ast.Concat); ok { + result = append(c.Exprs, parserS[parserpt-0].node) } else { - result = []ast.Node{parserDollar[1].node, parserDollar[2].node} + result = []ast.Node{parserS[parserpt-1].node, parserS[parserpt-0].node} } parserVAL.node = &ast.Concat{ @@ -517,103 +395,89 @@ parserdefault: } } case 5: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:86 { - parserVAL.node = parserDollar[1].node + parserVAL.node = parserS[parserpt-0].node } case 6: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:90 { - parserVAL.node = parserDollar[1].node + parserVAL.node = parserS[parserpt-0].node } case 7: - parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:96 { - parserVAL.node = parserDollar[2].node + parserVAL.node = parserS[parserpt-1].node } case 8: - parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:102 { - parserVAL.node = parserDollar[2].node + parserVAL.node = parserS[parserpt-1].node } case 9: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:106 { - parserVAL.node = parserDollar[1].node + parserVAL.node = parserS[parserpt-0].node } case 10: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:110 { parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(int), + Value: parserS[parserpt-0].token.Value.(int), Typex: ast.TypeInt, - Posx: parserDollar[1].token.Pos, + Posx: parserS[parserpt-0].token.Pos, } } case 11: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:118 { parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(float64), + Value: parserS[parserpt-0].token.Value.(float64), Typex: ast.TypeFloat, - Posx: parserDollar[1].token.Pos, + Posx: parserS[parserpt-0].token.Pos, } } case 12: - parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:126 { parserVAL.node = &ast.Arithmetic{ - Op: parserDollar[2].token.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node}, - Posx: parserDollar[1].node.Pos(), + Op: parserS[parserpt-1].token.Value.(ast.ArithmeticOp), + Exprs: []ast.Node{parserS[parserpt-2].node, parserS[parserpt-0].node}, + Posx: parserS[parserpt-2].node.Pos(), } } case 13: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:134 { - parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos} + parserVAL.node = &ast.VariableAccess{Name: parserS[parserpt-0].token.Value.(string), Posx: parserS[parserpt-0].token.Pos} } case 14: - parserDollar = parserS[parserpt-4 : parserpt+1] //line lang.y:138 { - parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos} + parserVAL.node = &ast.Call{Func: parserS[parserpt-3].token.Value.(string), Args: parserS[parserpt-1].nodeList, Posx: parserS[parserpt-3].token.Pos} } case 15: - parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:143 { parserVAL.nodeList = nil } case 16: - parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:147 { - parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node) + parserVAL.nodeList = append(parserS[parserpt-2].nodeList, parserS[parserpt-0].node) } case 17: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:151 { - parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node) + parserVAL.nodeList = append(parserVAL.nodeList, parserS[parserpt-0].node) } case 18: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:157 { parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(string), + Value: parserS[parserpt-0].token.Value.(string), Typex: ast.TypeString, - Posx: parserDollar[1].token.Pos, + Posx: parserS[parserpt-0].token.Pos, } } } From cccc5d03e393acdddacbdb8dc27cdeed602047a9 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Tue, 20 Oct 2015 14:49:51 +1300 Subject: [PATCH 051/100] Add lower / upper interpolation functions --- config/interpolate_funcs.go | 28 +++++++++++ config/interpolate_funcs_test.go | 48 +++++++++++++++++++ .../docs/configuration/interpolation.html.md | 4 ++ 3 files changed, 80 insertions(+) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 5322e46c4..1b58ac93c 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -29,10 +29,12 @@ func init() { "index": interpolationFuncIndex(), "join": interpolationFuncJoin(), "length": interpolationFuncLength(), + "lower": interpolationFuncLower(), "replace": interpolationFuncReplace(), "split": interpolationFuncSplit(), "base64encode": interpolationFuncBase64Encode(), "base64decode": interpolationFuncBase64Decode(), + "upper": interpolationFuncUpper(), } } @@ -442,3 +444,29 @@ func interpolationFuncBase64Decode() ast.Function { }, } } + +// interpolationFuncLower implements the "lower" function that does +// string lower casing. +func interpolationFuncLower() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toLower := args[0].(string) + return strings.ToLower(toLower), nil + }, + } +} + +// interpolationFuncUpper implements the "upper" function that does +// string upper casing. +func interpolationFuncUpper() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toUpper := args[0].(string) + return strings.ToUpper(toUpper), nil + }, + } +} diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index cafdf0564..f40f56860 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -644,6 +644,54 @@ func TestInterpolateFuncBase64Decode(t *testing.T) { }) } +func TestInterpolateFuncLower(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + { + `${lower("HELLO")}`, + "hello", + false, + }, + + { + `${lower("")}`, + "", + false, + }, + + { + `${lower()}`, + nil, + true, + }, + }, + }) +} + +func TestInterpolateFuncUpper(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + { + `${upper("hello")}`, + "HELLO", + false, + }, + + { + `${upper("")}`, + "", + false, + }, + + { + `${upper()}`, + nil, + true, + }, + }, + }) +} + type testFunctionConfig struct { Cases []testFunctionCase Vars map[string]ast.Variable diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 28d03790d..940839076 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -131,6 +131,8 @@ The supported built-in functions are: variable. The `map` parameter should be another variable, such as `var.amis`. + * `lower(string)` - returns a copy of the string with all Unicode letters mapped to their lower case. + * `replace(string, search, replace)` - Does a search and replace on the given string. All instances of `search` are replaced with the value of `replace`. If `search` is wrapped in forward slashes, it is treated @@ -147,6 +149,8 @@ The supported built-in functions are: `a_resource_param = ["${split(",", var.CSV_STRING)}"]`. Example: `split(",", module.amod.server_ids)` + * `upper(string)` - returns a copy of the string with all Unicode letters mapped to their upper case. + ## Templates Long strings can be managed using templates. [Templates](/docs/providers/template/index.html) are [resources](/docs/configuration/resources.html) defined by a filename and some variables to use during interpolation. They have a computed `rendered` attribute containing the result. From 938b7e2dba9d27b7f0fab7faa201231a06d9e42a Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 21 Oct 2015 08:19:51 -0700 Subject: [PATCH 052/100] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 007dedfcd..48ceb0cd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ FEATURES: * New remote state backend: `etcd` [GH-3487] + * New interpolation functions: `upper` and `lower` [GH-3558] INTERNAL IMPROVEMENTS: From dd56b39e0ce8a0567eb6f154a5e73f681bfa615e Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 21 Oct 2015 08:34:46 -0700 Subject: [PATCH 053/100] Codeploy deployment group app_name instead of application_name. The corresponding resource is called aws_codeploy_app, so for consistency we'll name the attribute app_name instead of application_name. --- .../aws/resource_aws_codedeploy_deployment_group.go | 12 ++++++------ .../resource_aws_codedeploy_deployment_group_test.go | 6 +++--- .../aws/r/codedeploy_deployment_group.html.markdown | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go index a9f3acb07..ee81f1cf3 100644 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go @@ -23,7 +23,7 @@ func resourceAwsCodeDeployDeploymentGroup() *schema.Resource { Delete: resourceAwsCodeDeployDeploymentGroupDelete, Schema: map[string]*schema.Schema{ - "application_name": &schema.Schema{ + "app_name": &schema.Schema{ Type: schema.TypeString, Required: true, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { @@ -132,7 +132,7 @@ func resourceAwsCodeDeployDeploymentGroup() *schema.Resource { func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).codedeployconn - application := d.Get("application_name").(string) + application := d.Get("app_name").(string) deploymentGroup := d.Get("deployment_group_name").(string) input := codedeploy.CreateDeploymentGroupInput{ @@ -189,14 +189,14 @@ func resourceAwsCodeDeployDeploymentGroupRead(d *schema.ResourceData, meta inter log.Printf("[DEBUG] Reading CodeDeploy DeploymentGroup %s", d.Id()) resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("application_name").(string)), + ApplicationName: aws.String(d.Get("app_name").(string)), DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), }) if err != nil { return err } - d.Set("application_name", *resp.DeploymentGroupInfo.ApplicationName) + d.Set("app_name", *resp.DeploymentGroupInfo.ApplicationName) d.Set("autoscaling_groups", resp.DeploymentGroupInfo.AutoScalingGroups) d.Set("deployment_config_name", *resp.DeploymentGroupInfo.DeploymentConfigName) d.Set("deployment_group_name", *resp.DeploymentGroupInfo.DeploymentGroupName) @@ -215,7 +215,7 @@ func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta int conn := meta.(*AWSClient).codedeployconn input := codedeploy.UpdateDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("application_name").(string)), + ApplicationName: aws.String(d.Get("app_name").(string)), CurrentDeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), } @@ -258,7 +258,7 @@ func resourceAwsCodeDeployDeploymentGroupDelete(d *schema.ResourceData, meta int log.Printf("[DEBUG] Deleting CodeDeploy DeploymentGroup %s", d.Id()) _, err := conn.DeleteDeploymentGroup(&codedeploy.DeleteDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("application_name").(string)), + ApplicationName: aws.String(d.Get("app_name").(string)), DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), }) if err != nil { diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go index d883b26b8..7608b1f58 100644 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go @@ -41,7 +41,7 @@ func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error { } resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ - ApplicationName: aws.String(rs.Primary.Attributes["application_name"]), + ApplicationName: aws.String(rs.Primary.Attributes["app_name"]), DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]), }) @@ -123,7 +123,7 @@ EOF } resource "aws_codedeploy_deployment_group" "foo" { - application_name = "${aws_codedeploy_app.foo_app.name}" + app_name = "${aws_codedeploy_app.foo_app.name}" deployment_group_name = "foo" service_role_arn = "${aws_iam_role.foo_role.arn}" ec2_tag_filter { @@ -188,7 +188,7 @@ EOF } resource "aws_codedeploy_deployment_group" "foo" { - application_name = "${aws_codedeploy_app.foo_app.name}" + app_name = "${aws_codedeploy_app.foo_app.name}" deployment_group_name = "bar" service_role_arn = "${aws_iam_role.foo_role.arn}" ec2_tag_filter { diff --git a/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown index cb2417fed..ae0c3b645 100644 --- a/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown +++ b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown @@ -67,7 +67,7 @@ EOF } resource "aws_codedeploy_deployment_group" "foo" { - application_name = "${aws_codedeploy_app.foo_app.name}" + app_name = "${aws_codedeploy_app.foo_app.name}" deployment_group_name = "bar" service_role_arn = "${aws_iam_role.foo_role.arn}" ec2_tag_filter { @@ -82,7 +82,7 @@ resource "aws_codedeploy_deployment_group" "foo" { The following arguments are supported: -* `application_name` - (Required) The name of the application. +* `app_name` - (Required) The name of the application. * `deployment_group_name` - (Required) The name of the deployment group. * `service_role_arn` - (Required) The service role ARN that allows deployments. * `autoscaling_groups` - (Optional) Autoscaling groups associated with the deployment group. @@ -101,7 +101,7 @@ Both ec2_tag_filter and on_premises_tag_filter blocks support the following: The following attributes are exported: * `id` - The deployment group's ID. -* `application_name` - The group's assigned application. +* `app_name` - The group's assigned application. * `deployment_group_name` - The group's name. * `service_role_arn` - The group's service role ARN. * `autoscaling_groups` - The autoscaling groups associated with the deployment group. From 305db7341b7d710e35c6b4273ea292fc43435a13 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 21 Oct 2015 10:37:18 -0500 Subject: [PATCH 054/100] Update CHANGELOG.md --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48ceb0cd7..a68b33517 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,16 @@ FEATURES: * New remote state backend: `etcd` [GH-3487] * New interpolation functions: `upper` and `lower` [GH-3558] +BUG FIXES: + + * core: Fix remote state conflicts caused by ambiguity in ordering of deeply nested modules [GH-3573] + * core: Fix remote state conflicts caused by state metadata differences [GH-3569] + * core: Avoid using http.DefaultClient [GH-3532] + INTERNAL IMPROVEMENTS: - * provider/digitalocean: use official Go client [GH-3333] + * provider/digitalocean: use official Go client [GH-3333] + * core: extract module fetching to external library [GH-3516] ## 0.6.4 (October 15, 2015) From 8a60219c0455de66a30372734f961e30542f1f81 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 21 Oct 2015 08:40:26 -0700 Subject: [PATCH 055/100] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a68b33517..ea969533a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: + * **New resources: `aws_codeploy_app` and `aws_codeploy_deployment_group`** [GH-2783] * New remote state backend: `etcd` [GH-3487] * New interpolation functions: `upper` and `lower` [GH-3558] From f790309634ba59f87712635cdf751b6d653762b3 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 21 Oct 2015 08:44:19 -0700 Subject: [PATCH 056/100] CodeDeploy links to the AWS provider sidebar. Missed these when merging #2783. --- website/source/layouts/aws.erb | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 4b34da23a..f6efd2377 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -26,6 +26,21 @@ + > + CodeDeploy Resources + + + > Directory Service Resources From ed951639847c310357778e9316b6ae1c51af54aa Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 27 Oct 2015 16:17:19 -0500 Subject: [PATCH 088/100] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f36e70dbe..885774c90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ IMPROVEMENTS: * provider/google: Accurate Terraform Version [GH-3554] * provider/google: Simplified auth (DefaultClient support) [GH-3553] * provider/google: automatic_restart, preemptible, on_host_maintenance options [GH-3643] + * null_resource: enhance and document [GH-3244, GH-3659] BUG FIXES: From 122790d32bd23fbe0c91e9bb1dd0bce19f1ec8b5 Mon Sep 17 00:00:00 2001 From: Kazunori Kojima Date: Wed, 28 Oct 2015 09:19:37 +0900 Subject: [PATCH 089/100] Add check errors on reading CORS rules --- builtin/providers/aws/resource_aws_s3_bucket.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index 93105ec51..3c284370f 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -276,7 +276,9 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { rule["max_age_seconds"] = ruleObject.MaxAgeSeconds rules = append(rules, rule) } - d.Set("cors_rule", rules) + if err := d.Set("cors_rule", rules); err != nil { + return fmt.Errorf("error reading S3 bucket \"%s\" CORS rules: %s", d.Id(), err) + } } // Read the website configuration From 89fb16ada0ca5d9f8fc368c46799881d753ede9b Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 28 Oct 2015 10:10:06 -0500 Subject: [PATCH 090/100] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 885774c90..47a446aa9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ IMPROVEMENTS: * provider/google: Simplified auth (DefaultClient support) [GH-3553] * provider/google: automatic_restart, preemptible, on_host_maintenance options [GH-3643] * null_resource: enhance and document [GH-3244, GH-3659] + * provider/aws: Add CORS settings to S3 bucket [GH-3387] BUG FIXES: From 784aadd5056ac05b53cec1583b5f2e4beeb4106a Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 28 Oct 2015 14:54:53 +0000 Subject: [PATCH 091/100] Allow cluster name, not only ARN for aws_ecs_service --- .../providers/aws/resource_aws_ecs_service.go | 18 +++++-- .../aws/resource_aws_ecs_service_test.go | 48 +++++++++++++++++++ 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/resource_aws_ecs_service.go b/builtin/providers/aws/resource_aws_ecs_service.go index 9d3a36ab2..ab8562acb 100644 --- a/builtin/providers/aws/resource_aws_ecs_service.go +++ b/builtin/providers/aws/resource_aws_ecs_service.go @@ -137,7 +137,6 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] ECS service created: %s", *service.ServiceArn) d.SetId(*service.ServiceArn) - d.Set("cluster", *service.ClusterArn) return resourceAwsEcsServiceUpdate(d, meta) } @@ -175,14 +174,21 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error { } d.Set("desired_count", *service.DesiredCount) - d.Set("cluster", *service.ClusterArn) + + // Save cluster in the same format + if strings.HasPrefix(d.Get("cluster").(string), "arn:aws:ecs:") { + d.Set("cluster", *service.ClusterArn) + } else { + clusterARN := getNameFromARN(*service.ClusterArn) + d.Set("cluster", clusterARN) + } // Save IAM role in the same format if service.RoleArn != nil { if strings.HasPrefix(d.Get("iam_role").(string), "arn:aws:iam:") { d.Set("iam_role", *service.RoleArn) } else { - roleARN := buildIamRoleNameFromARN(*service.RoleArn) + roleARN := getNameFromARN(*service.RoleArn) d.Set("iam_role", roleARN) } } @@ -306,8 +312,10 @@ func buildFamilyAndRevisionFromARN(arn string) string { return strings.Split(arn, "/")[1] } -func buildIamRoleNameFromARN(arn string) string { - // arn:aws:iam::0123456789:role/EcsService +// Expects the following ARNs: +// arn:aws:iam::0123456789:role/EcsService +// arn:aws:ecs:us-west-2:0123456789:cluster/radek-cluster +func getNameFromARN(arn string) string { return strings.Split(arn, "/")[1] } diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go index 2f9b8fedb..7f88f1536 100644 --- a/builtin/providers/aws/resource_aws_ecs_service_test.go +++ b/builtin/providers/aws/resource_aws_ecs_service_test.go @@ -178,6 +178,26 @@ func TestAccAWSEcsService_withIamRole(t *testing.T) { }) } +// Regression for https://github.com/hashicorp/terraform/issues/3361 +func TestAccAWSEcsService_withEcsClusterName(t *testing.T) { + clusterName := regexp.MustCompile("^terraformecstestcluster$") + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEcsServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSEcsServiceWithEcsClusterName, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), + resource.TestMatchResourceAttr( + "aws_ecs_service.jenkins", "cluster", clusterName), + ), + }, + }, + }) +} + func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ecsconn @@ -471,3 +491,31 @@ resource "aws_ecs_service" "ghost" { desired_count = 1 } ` + +var testAccAWSEcsServiceWithEcsClusterName = ` +resource "aws_ecs_cluster" "default" { + name = "terraformecstestcluster" +} + +resource "aws_ecs_task_definition" "jenkins" { + family = "jenkins" + container_definitions = < Date: Wed, 28 Oct 2015 16:17:20 +0000 Subject: [PATCH 092/100] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47a446aa9..bf27fb1c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 0.6.7 (Unreleased) +FEATURES: + + * **New resources: `aws_cloudformation_stack`** [GH-2636] + IMPROVEMENTS: * provider/google: Accurate Terraform Version [GH-3554] From af04321723200d4cb4158e7e9a6b1f8b9f6aaf1b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 28 Oct 2015 12:17:14 -0400 Subject: [PATCH 093/100] config: return to the go1.5 generated lang/y.go It has improvements to error messaging that we want. We'll use this occasion begin developing / building with Go 1.5 from here on out. Build times will be slower, but we have core development plans that will help mitigate that. /cc @hashicorp/terraform-committers --- config/lang/y.go | 272 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 204 insertions(+), 68 deletions(-) diff --git a/config/lang/y.go b/config/lang/y.go index e7dd185ae..fd0693f15 100644 --- a/config/lang/y.go +++ b/config/lang/y.go @@ -30,7 +30,10 @@ const INTEGER = 57355 const FLOAT = 57356 const STRING = 57357 -var parserToknames = []string{ +var parserToknames = [...]string{ + "$end", + "error", + "$unk", "PROGRAM_BRACKET_LEFT", "PROGRAM_BRACKET_RIGHT", "PROGRAM_STRING_START", @@ -44,7 +47,7 @@ var parserToknames = []string{ "FLOAT", "STRING", } -var parserStatenames = []string{} +var parserStatenames = [...]string{} const parserEofCode = 1 const parserErrCode = 2 @@ -53,7 +56,7 @@ const parserMaxDepth = 200 //line lang.y:165 //line yacctab:1 -var parserExca = []int{ +var parserExca = [...]int{ -1, 1, 1, -1, -2, 0, @@ -67,75 +70,103 @@ var parserStates []string const parserLast = 30 -var parserAct = []int{ +var parserAct = [...]int{ 9, 20, 16, 16, 7, 7, 3, 18, 10, 8, 1, 17, 14, 12, 13, 6, 6, 19, 8, 22, 15, 23, 24, 11, 2, 25, 16, 21, 4, 5, } -var parserPact = []int{ +var parserPact = [...]int{ 1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15, 0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000, -1000, 12, -9, -1000, 0, -9, } -var parserPgo = []int{ +var parserPgo = [...]int{ 0, 0, 29, 28, 23, 6, 27, 10, } -var parserR1 = []int{ +var parserR1 = [...]int{ 0, 7, 7, 4, 4, 5, 5, 2, 1, 1, 1, 1, 1, 1, 1, 6, 6, 6, 3, } -var parserR2 = []int{ +var parserR2 = [...]int{ 0, 0, 1, 1, 2, 1, 1, 3, 3, 1, 1, 1, 3, 1, 4, 0, 3, 1, 1, } -var parserChk = []int{ +var parserChk = [...]int{ -1000, -7, -4, -5, -3, -2, 15, 4, -5, -1, 8, -4, 13, 14, 12, 5, 11, -1, 8, -1, 9, -6, -1, 9, 10, -1, } -var parserDef = []int{ +var parserDef = [...]int{ 1, -2, 2, 3, 5, 6, 18, 0, 4, 0, 0, 9, 10, 11, 13, 7, 0, 0, 15, 12, 8, 0, 17, 14, 0, 16, } -var parserTok1 = []int{ +var parserTok1 = [...]int{ 1, } -var parserTok2 = []int{ +var parserTok2 = [...]int{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, } -var parserTok3 = []int{ +var parserTok3 = [...]int{ 0, } +var parserErrorMessages = [...]struct { + state int + token int + msg string +}{} + //line yaccpar:1 /* parser for yacc output */ -var parserDebug = 0 +var ( + parserDebug = 0 + parserErrorVerbose = false +) type parserLexer interface { Lex(lval *parserSymType) int Error(s string) } +type parserParser interface { + Parse(parserLexer) int + Lookahead() int +} + +type parserParserImpl struct { + lookahead func() int +} + +func (p *parserParserImpl) Lookahead() int { + return p.lookahead() +} + +func parserNewParser() parserParser { + p := &parserParserImpl{ + lookahead: func() int { return -1 }, + } + return p +} + const parserFlag = -1000 func parserTokname(c int) string { - // 4 is TOKSTART above - if c >= 4 && c-4 < len(parserToknames) { - if parserToknames[c-4] != "" { - return parserToknames[c-4] + if c >= 1 && c-1 < len(parserToknames) { + if parserToknames[c-1] != "" { + return parserToknames[c-1] } } return __yyfmt__.Sprintf("tok-%v", c) @@ -150,51 +181,129 @@ func parserStatname(s int) string { return __yyfmt__.Sprintf("state-%v", s) } -func parserlex1(lex parserLexer, lval *parserSymType) int { - c := 0 - char := lex.Lex(lval) +func parserErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !parserErrorVerbose { + return "syntax error" + } + + for _, e := range parserErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + parserTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := parserPact[state] + for tok := TOKSTART; tok-1 < len(parserToknames); tok++ { + if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if parserDef[state] == -2 { + i := 0 + for parserExca[i] != -1 || parserExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; parserExca[i] >= 0; i += 2 { + tok := parserExca[i] + if tok < TOKSTART || parserExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if parserExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += parserTokname(tok) + } + return res +} + +func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) if char <= 0 { - c = parserTok1[0] + token = parserTok1[0] goto out } if char < len(parserTok1) { - c = parserTok1[char] + token = parserTok1[char] goto out } if char >= parserPrivate { if char < parserPrivate+len(parserTok2) { - c = parserTok2[char-parserPrivate] + token = parserTok2[char-parserPrivate] goto out } } for i := 0; i < len(parserTok3); i += 2 { - c = parserTok3[i+0] - if c == char { - c = parserTok3[i+1] + token = parserTok3[i+0] + if token == char { + token = parserTok3[i+1] goto out } } out: - if c == 0 { - c = parserTok2[1] /* unknown char */ + if token == 0 { + token = parserTok2[1] /* unknown char */ } if parserDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char)) + __yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char)) } - return c + return char, token } func parserParse(parserlex parserLexer) int { + return parserNewParser().Parse(parserlex) +} + +func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int { var parsern int var parserlval parserSymType var parserVAL parserSymType + var parserDollar []parserSymType + _ = parserDollar // silence set and not used parserS := make([]parserSymType, parserMaxDepth) Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ parserstate := 0 parserchar := -1 + parsertoken := -1 // parserchar translated into internal numbering + parserrcvr.lookahead = func() int { return parserchar } + defer func() { + // Make sure we report no lookahead when not parsing. + parserstate = -1 + parserchar = -1 + parsertoken = -1 + }() parserp := -1 goto parserstack @@ -207,7 +316,7 @@ ret1: parserstack: /* put a state and value onto the stack */ if parserDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate)) + __yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate)) } parserp++ @@ -225,15 +334,16 @@ parsernewstate: goto parserdefault /* simple state */ } if parserchar < 0 { - parserchar = parserlex1(parserlex, &parserlval) + parserchar, parsertoken = parserlex1(parserlex, &parserlval) } - parsern += parserchar + parsern += parsertoken if parsern < 0 || parsern >= parserLast { goto parserdefault } parsern = parserAct[parsern] - if parserChk[parsern] == parserchar { /* valid shift */ + if parserChk[parsern] == parsertoken { /* valid shift */ parserchar = -1 + parsertoken = -1 parserVAL = parserlval parserstate = parsern if Errflag > 0 { @@ -247,7 +357,7 @@ parserdefault: parsern = parserDef[parserstate] if parsern == -2 { if parserchar < 0 { - parserchar = parserlex1(parserlex, &parserlval) + parserchar, parsertoken = parserlex1(parserlex, &parserlval) } /* look through exception table */ @@ -260,7 +370,7 @@ parserdefault: } for xi += 2; ; xi += 2 { parsern = parserExca[xi+0] - if parsern < 0 || parsern == parserchar { + if parsern < 0 || parsern == parsertoken { break } } @@ -273,11 +383,11 @@ parserdefault: /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ - parserlex.Error("syntax error") + parserlex.Error(parserErrorMessage(parserstate, parsertoken)) Nerrs++ if parserDebug >= 1 { __yyfmt__.Printf("%s", parserStatname(parserstate)) - __yyfmt__.Printf(" saw %s\n", parserTokname(parserchar)) + __yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken)) } fallthrough @@ -305,12 +415,13 @@ parserdefault: case 3: /* no shift yet; clobber input char */ if parserDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar)) + __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken)) } - if parserchar == parserEofCode { + if parsertoken == parserEofCode { goto ret1 } parserchar = -1 + parsertoken = -1 goto parsernewstate /* try again in the same state */ } } @@ -325,6 +436,13 @@ parserdefault: _ = parserpt // guard against "declared and not used" parserp -= parserR2[parsern] + // parserp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if parserp+1 >= len(parserS) { + nyys := make([]parserSymType, len(parserS)*2) + copy(nyys, parserS) + parserS = nyys + } parserVAL = parserS[parserp+1] /* consult goto table to find next state */ @@ -344,6 +462,7 @@ parserdefault: switch parsernt { case 1: + parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:35 { parserResult = &ast.LiteralNode{ @@ -353,9 +472,10 @@ parserdefault: } } case 2: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:43 { - parserResult = parserS[parserpt-0].node + parserResult = parserDollar[1].node // We want to make sure that the top value is always a Concat // so that the return value is always a string type from an @@ -365,28 +485,30 @@ parserdefault: // because functionally the AST is the same, but we do that because // it makes for an easy literal check later (to check if a string // has any interpolations). - if _, ok := parserS[parserpt-0].node.(*ast.Concat); !ok { - if n, ok := parserS[parserpt-0].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { + if _, ok := parserDollar[1].node.(*ast.Concat); !ok { + if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { parserResult = &ast.Concat{ - Exprs: []ast.Node{parserS[parserpt-0].node}, - Posx: parserS[parserpt-0].node.Pos(), + Exprs: []ast.Node{parserDollar[1].node}, + Posx: parserDollar[1].node.Pos(), } } } } case 3: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:66 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 4: + parserDollar = parserS[parserpt-2 : parserpt+1] //line lang.y:70 { var result []ast.Node - if c, ok := parserS[parserpt-1].node.(*ast.Concat); ok { - result = append(c.Exprs, parserS[parserpt-0].node) + if c, ok := parserDollar[1].node.(*ast.Concat); ok { + result = append(c.Exprs, parserDollar[2].node) } else { - result = []ast.Node{parserS[parserpt-1].node, parserS[parserpt-0].node} + result = []ast.Node{parserDollar[1].node, parserDollar[2].node} } parserVAL.node = &ast.Concat{ @@ -395,89 +517,103 @@ parserdefault: } } case 5: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:86 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 6: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:90 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 7: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:96 { - parserVAL.node = parserS[parserpt-1].node + parserVAL.node = parserDollar[2].node } case 8: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:102 { - parserVAL.node = parserS[parserpt-1].node + parserVAL.node = parserDollar[2].node } case 9: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:106 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 10: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:110 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(int), + Value: parserDollar[1].token.Value.(int), Typex: ast.TypeInt, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } case 11: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:118 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(float64), + Value: parserDollar[1].token.Value.(float64), Typex: ast.TypeFloat, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } case 12: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:126 { parserVAL.node = &ast.Arithmetic{ - Op: parserS[parserpt-1].token.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{parserS[parserpt-2].node, parserS[parserpt-0].node}, - Posx: parserS[parserpt-2].node.Pos(), + Op: parserDollar[2].token.Value.(ast.ArithmeticOp), + Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node}, + Posx: parserDollar[1].node.Pos(), } } case 13: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:134 { - parserVAL.node = &ast.VariableAccess{Name: parserS[parserpt-0].token.Value.(string), Posx: parserS[parserpt-0].token.Pos} + parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos} } case 14: + parserDollar = parserS[parserpt-4 : parserpt+1] //line lang.y:138 { - parserVAL.node = &ast.Call{Func: parserS[parserpt-3].token.Value.(string), Args: parserS[parserpt-1].nodeList, Posx: parserS[parserpt-3].token.Pos} + parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos} } case 15: + parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:143 { parserVAL.nodeList = nil } case 16: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:147 { - parserVAL.nodeList = append(parserS[parserpt-2].nodeList, parserS[parserpt-0].node) + parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node) } case 17: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:151 { - parserVAL.nodeList = append(parserVAL.nodeList, parserS[parserpt-0].node) + parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node) } case 18: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:157 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(string), + Value: parserDollar[1].token.Value.(string), Typex: ast.TypeString, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } } From 22ec52396adef10449fa55c81f3aada69fa008cb Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 28 Oct 2015 16:30:03 +0000 Subject: [PATCH 094/100] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf27fb1c3..075cb0f44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ BUG FIXES: * provider/google: Timeout when deleting large instance_group_manager [GH-3591] * provider/aws: Fix issue with order of Termincation Policies in AutoScaling Groups. This will introduce plans on upgrade to this version, in order to correct the ordering [GH-2890] + * provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` [GH-3668] ## 0.6.6 (October 23, 2015) From a618b048cf232000327db590df7cb2b4a0049b19 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 27 Aug 2015 23:11:56 +0100 Subject: [PATCH 095/100] aws: Add support for aws_cloudtrail --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../providers/aws/resource_aws_cloudtrail.go | 167 ++++++++++++++++++ 3 files changed, 173 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_cloudtrail.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index b8fc9fa47..298d24ccd 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/aws/aws-sdk-go/service/cloudtrail" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/codedeploy" @@ -51,6 +52,7 @@ type Config struct { type AWSClient struct { cfconn *cloudformation.CloudFormation + cloudtrailconn *cloudtrail.CloudTrail cloudwatchconn *cloudwatch.CloudWatch cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs dsconn *directoryservice.DirectoryService @@ -188,6 +190,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing CloudWatch SDK connection") client.cloudwatchconn = cloudwatch.New(awsConfig) + log.Println("[INFO] Initializing CloudTrail connection") + client.cloudtrailconn = cloudtrail.New(awsConfig) + log.Println("[INFO] Initializing CloudWatch Logs connection") client.cloudwatchlogsconn = cloudwatchlogs.New(awsConfig) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 5b02d4a70..d4567e238 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -164,6 +164,7 @@ func Provider() terraform.ResourceProvider { "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), "aws_cloudformation_stack": resourceAwsCloudFormationStack(), + "aws_cloudtrail": resourceAwsCloudTrail(), "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), diff --git a/builtin/providers/aws/resource_aws_cloudtrail.go b/builtin/providers/aws/resource_aws_cloudtrail.go new file mode 100644 index 000000000..eed420edf --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudtrail.go @@ -0,0 +1,167 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudtrail" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudTrail() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudTrailCreate, + Read: resourceAwsCloudTrailRead, + Update: resourceAwsCloudTrailUpdate, + Delete: resourceAwsCloudTrailDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "s3_bucket_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "s3_key_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "cloud_watch_logs_role_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "cloud_watch_logs_group_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "include_global_service_events": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "sns_topic_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsCloudTrailCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudtrailconn + + input := cloudtrail.CreateTrailInput{ + Name: aws.String(d.Get("name").(string)), + S3BucketName: aws.String(d.Get("s3_bucket_name").(string)), + } + + if v, ok := d.GetOk("cloud_watch_logs_group_arn"); ok { + input.CloudWatchLogsLogGroupArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("cloud_watch_logs_role_arn"); ok { + input.CloudWatchLogsRoleArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("include_global_service_events"); ok { + input.IncludeGlobalServiceEvents = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("s3_key_prefix"); ok { + input.S3KeyPrefix = aws.String(v.(string)) + } + if v, ok := d.GetOk("sns_topic_name"); ok { + input.SnsTopicName = aws.String(v.(string)) + } + + t, err := conn.CreateTrail(&input) + if err != nil { + return err + } + + log.Printf("[DEBUG] CloudTrail created: %s", t) + + d.SetId(*t.Name) + + return resourceAwsCloudTrailRead(d, meta) +} + +func resourceAwsCloudTrailRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudtrailconn + + name := d.Get("name").(string) + input := cloudtrail.DescribeTrailsInput{ + TrailNameList: []*string{ + aws.String(name), + }, + } + resp, err := conn.DescribeTrails(&input) + if err != nil { + return err + } + if len(resp.TrailList) == 0 { + return fmt.Errorf("No CloudTrail found, using name %q", name) + } + + trail := resp.TrailList[0] + log.Printf("[DEBUG] CloudTrail received: %s", trail) + + d.Set("name", trail.Name) + d.Set("s3_bucket_name", trail.S3BucketName) + d.Set("s3_key_prefix", trail.S3KeyPrefix) + d.Set("cloud_watch_logs_role_arn", trail.CloudWatchLogsRoleArn) + d.Set("cloud_watch_logs_group_arn", trail.CloudWatchLogsLogGroupArn) + d.Set("include_global_service_events", trail.IncludeGlobalServiceEvents) + d.Set("sns_topic_name", trail.SnsTopicName) + + return nil +} + +func resourceAwsCloudTrailUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudtrailconn + + input := cloudtrail.UpdateTrailInput{ + Name: aws.String(d.Get("name").(string)), + } + + if d.HasChange("s3_bucket_name") { + input.S3BucketName = aws.String(d.Get("s3_bucket_name").(string)) + } + if d.HasChange("s3_key_prefix") { + input.S3KeyPrefix = aws.String(d.Get("s3_key_prefix").(string)) + } + if d.HasChange("cloud_watch_logs_role_arn") { + input.CloudWatchLogsRoleArn = aws.String(d.Get("cloud_watch_logs_role_arn").(string)) + } + if d.HasChange("cloud_watch_logs_group_arn") { + input.CloudWatchLogsLogGroupArn = aws.String(d.Get("cloud_watch_logs_group_arn").(string)) + } + if d.HasChange("include_global_service_events") { + input.IncludeGlobalServiceEvents = aws.Bool(d.Get("include_global_service_events").(bool)) + } + if d.HasChange("sns_topic_name") { + input.SnsTopicName = aws.String(d.Get("sns_topic_name").(string)) + } + + log.Printf("[DEBUG] Updating CloudTrail: %s", input) + t, err := conn.UpdateTrail(&input) + if err != nil { + return err + } + log.Printf("[DEBUG] CloudTrail updated: %s", t) + + return resourceAwsCloudTrailRead(d, meta) +} + +func resourceAwsCloudTrailDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudtrailconn + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting CloudTrail: %q", name) + _, err := conn.DeleteTrail(&cloudtrail.DeleteTrailInput{ + Name: aws.String(name), + }) + + return err +} From 7265bdaaf0cefc261bbc144acf1c09968c48943a Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 27 Aug 2015 23:12:12 +0100 Subject: [PATCH 096/100] aws: Add acceptance test for aws_cloudtrail --- .../aws/resource_aws_cloudtrail_test.go | 169 ++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_cloudtrail_test.go diff --git a/builtin/providers/aws/resource_aws_cloudtrail_test.go b/builtin/providers/aws/resource_aws_cloudtrail_test.go new file mode 100644 index 000000000..10ed17a5b --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudtrail_test.go @@ -0,0 +1,169 @@ +package aws + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudtrail" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCloudTrail_basic(t *testing.T) { + var trail cloudtrail.Trail + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudTrailDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudTrailConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), + resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "true"), + ), + }, + resource.TestStep{ + Config: testAccAWSCloudTrailConfigModified, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail), + resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "s3_key_prefix", "/prefix"), + resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "false"), + ), + }, + }, + }) +} + +func testAccCheckCloudTrailExists(n string, trail *cloudtrail.Trail) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := testAccProvider.Meta().(*AWSClient).cloudtrailconn + params := cloudtrail.DescribeTrailsInput{ + TrailNameList: []*string{aws.String(rs.Primary.ID)}, + } + resp, err := conn.DescribeTrails(¶ms) + if err != nil { + return err + } + if len(resp.TrailList) == 0 { + return fmt.Errorf("Trail not found") + } + *trail = *resp.TrailList[0] + + return nil + } +} + +func testAccCheckAWSCloudTrailDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cloudtrailconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cloudtrail" { + continue + } + + params := cloudtrail.DescribeTrailsInput{ + TrailNameList: []*string{aws.String(rs.Primary.ID)}, + } + + resp, err := conn.DescribeTrails(¶ms) + + if err == nil { + if len(resp.TrailList) != 0 && + *resp.TrailList[0].Name == rs.Primary.ID { + return fmt.Errorf("CloudTrail still exists: %s", rs.Primary.ID) + } + } + } + + return nil +} + +var cloudTrailRandInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() + +var testAccAWSCloudTrailConfig = fmt.Sprintf(` +resource "aws_cloudtrail" "foobar" { + name = "tf-trail-foobar" + s3_bucket_name = "${aws_s3_bucket.foo.id}" +} + +resource "aws_s3_bucket" "foo" { + bucket = "tf-test-trail-%d" + force_destroy = true + policy = < Date: Thu, 27 Aug 2015 23:25:29 +0100 Subject: [PATCH 097/100] aws: Add docs for aws_cloudtrail --- .../providers/aws/r/cloudtrail.html.markdown | 75 +++++++++++++++++++ website/source/layouts/aws.erb | 9 +++ 2 files changed, 84 insertions(+) create mode 100644 website/source/docs/providers/aws/r/cloudtrail.html.markdown diff --git a/website/source/docs/providers/aws/r/cloudtrail.html.markdown b/website/source/docs/providers/aws/r/cloudtrail.html.markdown new file mode 100644 index 000000000..d4ba604fc --- /dev/null +++ b/website/source/docs/providers/aws/r/cloudtrail.html.markdown @@ -0,0 +1,75 @@ +--- +layout: "aws" +page_title: "AWS: cloudtrail" +sidebar_current: "docs-aws-resource-cloudtrail" +description: |- + Provides a CloudTrail resource. +--- + +# aws\_cloudtrail + +Provides a CloudTrail resource. + +## Example Usage +``` +resource "aws_cloudtrail" "foobar" { + name = "tf-trail-foobar" + s3_bucket_name = "${aws_s3_bucket.foo.id}" + s3_key_prefix = "/prefix" + include_global_service_events = false +} + +resource "aws_s3_bucket" "foo" { + bucket = "tf-test-trail" + force_destroy = true + policy = < + > + CloudTrail Resources + + + > CloudWatch Resources