From 87cde8834ec176e5b1a86bec65853a345bd56890 Mon Sep 17 00:00:00 2001 From: Jean Mertz Date: Sun, 3 May 2015 16:00:00 +0200 Subject: [PATCH 001/220] OpenStack: add functionality to attach FloatingIP to Port --- ...urce_openstack_networking_floatingip_v2.go | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go index 1b81c6a96..37f1ca7cf 100644 --- a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go @@ -14,6 +14,7 @@ func resourceNetworkingFloatingIPV2() *schema.Resource { return &schema.Resource{ Create: resourceNetworkFloatingIPV2Create, Read: resourceNetworkFloatingIPV2Read, + Update: resourceNetworkFloatingIPV2Update, Delete: resourceNetworkFloatingIPV2Delete, Schema: map[string]*schema.Schema{ @@ -33,6 +34,11 @@ func resourceNetworkingFloatingIPV2() *schema.Resource { ForceNew: true, DefaultFunc: envDefaultFunc("OS_POOL_NAME"), }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, }, } } @@ -53,6 +59,7 @@ func resourceNetworkFloatingIPV2Create(d *schema.ResourceData, meta interface{}) } createOpts := floatingips.CreateOpts{ FloatingNetworkID: poolID, + PortID: d.Get("port_id").(string), } log.Printf("[DEBUG] Create Options: %#v", createOpts) floatingIP, err := floatingips.Create(networkClient, createOpts).Extract() @@ -78,6 +85,7 @@ func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) e } d.Set("address", floatingIP.FloatingIP) + d.Set("port_id", floatingIP.PortID) poolName, err := getNetworkName(d, meta, floatingIP.FloatingNetworkID) if err != nil { return fmt.Errorf("Error retrieving floating IP pool name: %s", err) @@ -87,6 +95,29 @@ func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) e return nil } +func resourceNetworkFloatingIPV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + var updateOpts floatingips.UpdateOpts + + if d.HasChange("port_id") { + updateOpts.PortID = d.Get("port_id").(string) + } + + log.Printf("[DEBUG] Update Options: %#v", updateOpts) + + _, err = floatingips.Update(networkClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating floating IP: %s", err) + } + + return resourceNetworkFloatingIPV2Read(d, meta) +} + func resourceNetworkFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) networkClient, err := config.networkingV2Client(d.Get("region").(string)) From 07ad320960e3fd61e80f00b919dc1a5217d0d321 Mon Sep 17 00:00:00 2001 From: Aaron Welch Date: Sun, 31 May 2015 15:58:14 +0100 Subject: [PATCH 002/220] Packet bare metal cloud hosting platform provider --- builtin/bins/provider-packet/main.go | 12 + builtin/providers/packet/config.go | 18 ++ builtin/providers/packet/provider.go | 36 +++ builtin/providers/packet/provider_test.go | 35 ++ .../packet/resource_packet_device.go | 302 ++++++++++++++++++ .../packet/resource_packet_project.go | 123 +++++++ .../packet/resource_packet_project_test.go | 95 ++++++ .../packet/resource_packet_ssh_key.go | 128 ++++++++ .../packet/resource_packet_ssh_key_test.go | 104 ++++++ .../docs/providers/packet/index.html.markdown | 47 +++ .../providers/packet/r/device.html.markdown | 55 ++++ .../providers/packet/r/project.html.markdown | 40 +++ .../providers/packet/r/ssh_key.html.markdown | 43 +++ 13 files changed, 1038 insertions(+) create mode 100644 builtin/bins/provider-packet/main.go create mode 100644 builtin/providers/packet/config.go create mode 100644 builtin/providers/packet/provider.go create mode 100644 builtin/providers/packet/provider_test.go create mode 100644 builtin/providers/packet/resource_packet_device.go create mode 100644 builtin/providers/packet/resource_packet_project.go create mode 100644 builtin/providers/packet/resource_packet_project_test.go create mode 100644 builtin/providers/packet/resource_packet_ssh_key.go create mode 100644 builtin/providers/packet/resource_packet_ssh_key_test.go create mode 100644 website/source/docs/providers/packet/index.html.markdown create mode 100644 website/source/docs/providers/packet/r/device.html.markdown create mode 100644 website/source/docs/providers/packet/r/project.html.markdown create mode 100644 website/source/docs/providers/packet/r/ssh_key.html.markdown diff --git a/builtin/bins/provider-packet/main.go b/builtin/bins/provider-packet/main.go new file mode 100644 index 000000000..6d8198ef2 --- /dev/null +++ b/builtin/bins/provider-packet/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/packet" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: packet.Provider, + }) +} diff --git a/builtin/providers/packet/config.go b/builtin/providers/packet/config.go new file mode 100644 index 000000000..659ee9ebc --- /dev/null +++ b/builtin/providers/packet/config.go @@ -0,0 +1,18 @@ +package packet + +import ( + "github.com/packethost/packngo" +) + +const ( + consumerToken = "aZ9GmqHTPtxevvFq9SK3Pi2yr9YCbRzduCSXF2SNem5sjB91mDq7Th3ZwTtRqMWZ" +) + +type Config struct { + AuthToken string +} + +// Client() returns a new client for accessing packet. +func (c *Config) Client() *packngo.Client { + return packngo.NewClient(consumerToken, c.AuthToken) +} diff --git a/builtin/providers/packet/provider.go b/builtin/providers/packet/provider.go new file mode 100644 index 000000000..c1efd6e83 --- /dev/null +++ b/builtin/providers/packet/provider.go @@ -0,0 +1,36 @@ +package packet + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a schema.Provider for Packet. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "auth_token": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("PACKET_AUTH_TOKEN", nil), + Description: "The API auth key for API operations.", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "packet_device": resourcePacketDevice(), + "packet_ssh_key": resourcePacketSSHKey(), + "packet_project": resourcePacketProject(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + AuthToken: d.Get("auth_token").(string), + } + + return config.Client(), nil +} diff --git a/builtin/providers/packet/provider_test.go b/builtin/providers/packet/provider_test.go new file mode 100644 index 000000000..5483c4fb0 --- /dev/null +++ b/builtin/providers/packet/provider_test.go @@ -0,0 +1,35 @@ +package packet + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "packet": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("PACKET_AUTH_TOKEN"); v == "" { + t.Fatal("PACKET_AUTH_TOKEN must be set for acceptance tests") + } +} diff --git a/builtin/providers/packet/resource_packet_device.go b/builtin/providers/packet/resource_packet_device.go new file mode 100644 index 000000000..56fc7afe5 --- /dev/null +++ b/builtin/providers/packet/resource_packet_device.go @@ -0,0 +1,302 @@ +package packet + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketDevice() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketDeviceCreate, + Read: resourcePacketDeviceRead, + Update: resourcePacketDeviceUpdate, + Delete: resourcePacketDeviceDelete, + + Schema: map[string]*schema.Schema{ + "project_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "hostname": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "operating_system": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "facility": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "plan": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "billing_cycle": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "gateway": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "family": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "cidr": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "public": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "user_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.DeviceCreateRequest{ + HostName: d.Get("hostname").(string), + Plan: d.Get("plan").(string), + Facility: d.Get("facility").(string), + OS: d.Get("operating_system").(string), + BillingCycle: d.Get("billing_cycle").(string), + ProjectID: d.Get("project_id").(string), + } + + if attr, ok := d.GetOk("user_data"); ok { + createRequest.UserData = attr.(string) + } + + tags := d.Get("tags.#").(int) + if tags > 0 { + createRequest.Tags = make([]string, 0, tags) + for i := 0; i < tags; i++ { + key := fmt.Sprintf("tags.%d", i) + createRequest.Tags = append(createRequest.Tags, d.Get(key).(string)) + } + } + + log.Printf("[DEBUG] Device create configuration: %#v", createRequest) + + newDevice, _, err := client.Devices.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating device: %s", err) + } + + // Assign the device id + d.SetId(newDevice.ID) + + log.Printf("[INFO] Device ID: %s", d.Id()) + + _, err = WaitForDeviceAttribute(d, "active", []string{"provisioning"}, "state", meta) + if err != nil { + return fmt.Errorf( + "Error waiting for device (%s) to become ready: %s", d.Id(), err) + } + + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + // Retrieve the device properties for updating the state + device, _, err := client.Devices.Get(d.Id()) + if err != nil { + return fmt.Errorf("Error retrieving device: %s", err) + } + + d.Set("name", device.Hostname) + d.Set("plan", device.Plan.Slug) + d.Set("facility", device.Facility.Code) + d.Set("operating_system", device.OS.Slug) + d.Set("state", device.State) + d.Set("billing_cycle", device.BillingCycle) + d.Set("locked", device.Locked) + d.Set("created", device.Created) + d.Set("udpated", device.Updated) + + tags := make([]string, 0) + for _, tag := range device.Tags { + tags = append(tags, tag) + } + d.Set("tags", tags) + + networks := make([]map[string]interface{}, 0, 1) + for _, ip := range device.Network { + network := make(map[string]interface{}) + network["address"] = ip.Address + network["gateway"] = ip.Gateway + network["family"] = ip.Family + network["cidr"] = ip.Cidr + network["public"] = ip.Public + networks = append(networks, network) + } + d.Set("network", networks) + + return nil +} + +func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if d.HasChange("locked") && d.Get("locked").(bool) { + _, err := client.Devices.Lock(d.Id()) + + if err != nil { + return fmt.Errorf( + "Error locking device (%s): %s", d.Id(), err) + } + } else if d.HasChange("locked") { + _, err := client.Devices.Unlock(d.Id()) + + if err != nil { + return fmt.Errorf( + "Error unlocking device (%s): %s", d.Id(), err) + } + } + + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting device: %s", d.Id()) + if _, err := client.Devices.Delete(d.Id()); err != nil { + return fmt.Errorf("Error deleting device: %s", err) + } + + return nil +} + +func WaitForDeviceAttribute( + d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { + // Wait for the device so we can get the networking attributes + // that show up after a while + log.Printf( + "[INFO] Waiting for device (%s) to have %s of %s", + d.Id(), attribute, target) + + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: target, + Refresh: newDeviceStateRefreshFunc(d, attribute, meta), + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + return stateConf.WaitForState() +} + +func newDeviceStateRefreshFunc( + d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { + client := meta.(*packngo.Client) + return func() (interface{}, string, error) { + err := resourcePacketDeviceRead(d, meta) + if err != nil { + return nil, "", err + } + + // See if we can access our attribute + if attr, ok := d.GetOk(attribute); ok { + // Retrieve the device properties + device, _, err := client.Devices.Get(d.Id()) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving device: %s", err) + } + + return &device, attr.(string), nil + } + + return nil, "", nil + } +} + +// Powers on the device and waits for it to be active +func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.Devices.PowerOn(d.Id()) + if err != nil { + return err + } + + // Wait for power on + _, err = WaitForDeviceAttribute(d, "active", []string{"off"}, "state", client) + if err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/packet/resource_packet_project.go b/builtin/providers/packet/resource_packet_project.go new file mode 100644 index 000000000..e41ef1381 --- /dev/null +++ b/builtin/providers/packet/resource_packet_project.go @@ -0,0 +1,123 @@ +package packet + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketProject() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketProjectCreate, + Read: resourcePacketProjectRead, + Update: resourcePacketProjectUpdate, + Delete: resourcePacketProjectDelete, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "payment_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketProjectCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.ProjectCreateRequest{ + Name: d.Get("name").(string), + PaymentMethod: d.Get("payment_method").(string), + } + + log.Printf("[DEBUG] Project create configuration: %#v", createRequest) + project, _, err := client.Projects.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating Project: %s", err) + } + + d.SetId(project.ID) + log.Printf("[INFO] Project created: %s", project.ID) + + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.Projects.Get(d.Id()) + if err != nil { + // If the project somehow already destroyed, mark as + // succesfully gone + if strings.Contains(err.Error(), "404") { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Project: %s", err) + } + + d.Set("id", key.ID) + d.Set("name", key.Name) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketProjectUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.ProjectUpdateRequest{ + ID: d.Get("id").(string), + Name: d.Get("name").(string), + } + + if attr, ok := d.GetOk("payment_method"); ok { + updateRequest.PaymentMethod = attr.(string) + } + + log.Printf("[DEBUG] Project update: %#v", d.Get("id")) + _, _, err := client.Projects.Update(updateRequest) + if err != nil { + return fmt.Errorf("Failed to update Project: %s", err) + } + + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting Project: %s", d.Id()) + _, err := client.Projects.Delete(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting SSH key: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/packet/resource_packet_project_test.go b/builtin/providers/packet/resource_packet_project_test.go new file mode 100644 index 000000000..b0179cfbe --- /dev/null +++ b/builtin/providers/packet/resource_packet_project_test.go @@ -0,0 +1,95 @@ +package packet + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/packethost/packngo" +) + +func TestAccPacketProject_Basic(t *testing.T) { + var project packngo.Project + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPacketProjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckPacketProjectConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckPacketProjectExists("packet_project.foobar", &project), + testAccCheckPacketProjectAttributes(&project), + resource.TestCheckResourceAttr( + "packet_project.foobar", "name", "foobar"), + ), + }, + }, + }) +} + +func testAccCheckPacketProjectDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*packngo.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "packet_project" { + continue + } + + _, _, err := client.Projects.Get(rs.Primary.ID) + + if err == nil { + fmt.Errorf("Project cstill exists") + } + } + + return nil +} + +func testAccCheckPacketProjectAttributes(project *packngo.Project) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if project.Name != "foobar" { + return fmt.Errorf("Bad name: %s", project.Name) + } + + return nil + } +} + +func testAccCheckPacketProjectExists(n string, project *packngo.Project) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + client := testAccProvider.Meta().(*packngo.Client) + + foundProject, _, err := client.Projects.Get(rs.Primary.ID) + + if err != nil { + return err + } + + if foundProject.ID != rs.Primary.ID { + return fmt.Errorf("Record not found: %v - %v", rs.Primary.ID, foundProject) + } + + *project = *foundProject + + return nil + } +} + +var testAccCheckPacketProjectConfig_basic = fmt.Sprintf(` +resource "packet_project" "foobar" { + name = "foobar" +}`) diff --git a/builtin/providers/packet/resource_packet_ssh_key.go b/builtin/providers/packet/resource_packet_ssh_key.go new file mode 100644 index 000000000..95e04bd8c --- /dev/null +++ b/builtin/providers/packet/resource_packet_ssh_key.go @@ -0,0 +1,128 @@ +package packet + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketSSHKeyCreate, + Read: resourcePacketSSHKeyRead, + Update: resourcePacketSSHKeyUpdate, + Delete: resourcePacketSSHKeyDelete, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "public_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.SSHKeyCreateRequest{ + Label: d.Get("name").(string), + Key: d.Get("public_key").(string), + } + + log.Printf("[DEBUG] SSH Key create configuration: %#v", createRequest) + key, _, err := client.SSHKeys.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating SSH Key: %s", err) + } + + d.SetId(key.ID) + log.Printf("[INFO] SSH Key: %s", key.ID) + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.SSHKeys.Get(d.Id()) + if err != nil { + // If the key is somehow already destroyed, mark as + // succesfully gone + if strings.Contains(err.Error(), "404") { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + + d.Set("id", key.ID) + d.Set("name", key.Label) + d.Set("public_key", key.Key) + d.Set("fingerprint", key.FingerPrint) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.SSHKeyUpdateRequest{ + ID: d.Get("id").(string), + Label: d.Get("name").(string), + Key: d.Get("public_key").(string), + } + + log.Printf("[DEBUG] SSH key update: %#v", d.Get("id")) + _, _, err := client.SSHKeys.Update(updateRequest) + if err != nil { + return fmt.Errorf("Failed to update SSH key: %s", err) + } + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting SSH key: %s", d.Id()) + _, err := client.SSHKeys.Delete(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting SSH key: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/packet/resource_packet_ssh_key_test.go b/builtin/providers/packet/resource_packet_ssh_key_test.go new file mode 100644 index 000000000..765086d4f --- /dev/null +++ b/builtin/providers/packet/resource_packet_ssh_key_test.go @@ -0,0 +1,104 @@ +package packet + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/packethost/packngo" +) + +func TestAccPacketSSHKey_Basic(t *testing.T) { + var key packngo.SSHKey + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPacketSSHKeyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckPacketSSHKeyConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckPacketSSHKeyExists("packet_ssh_key.foobar", &key), + testAccCheckPacketSSHKeyAttributes(&key), + resource.TestCheckResourceAttr( + "packet_ssh_key.foobar", "name", "foobar"), + resource.TestCheckResourceAttr( + "packet_ssh_key.foobar", "public_key", testAccValidPublicKey), + ), + }, + }, + }) +} + +func testAccCheckPacketSSHKeyDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*packngo.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "packet_ssh_key" { + continue + } + + _, _, err := client.SSHKeys.Get(rs.Primary.ID) + + if err == nil { + fmt.Errorf("SSH key still exists") + } + } + + return nil +} + +func testAccCheckPacketSSHKeyAttributes(key *packngo.SSHKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if key.Label != "foobar" { + return fmt.Errorf("Bad name: %s", key.Label) + } + + return nil + } +} + +func testAccCheckPacketSSHKeyExists(n string, key *packngo.SSHKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + client := testAccProvider.Meta().(*packngo.Client) + + foundKey, _, err := client.SSHKeys.Get(rs.Primary.ID) + + if err != nil { + return err + } + + if foundKey.ID != rs.Primary.ID { + return fmt.Errorf("SSh Key not found: %v - %v", rs.Primary.ID, foundKey) + } + + *key = *foundKey + + fmt.Printf("key: %v", key) + return nil + } +} + +var testAccCheckPacketSSHKeyConfig_basic = fmt.Sprintf(` +resource "packet_ssh_key" "foobar" { + name = "foobar" + public_key = "%s" +}`, testAccValidPublicKey) + +var testAccValidPublicKey = strings.TrimSpace(` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +`) diff --git a/website/source/docs/providers/packet/index.html.markdown b/website/source/docs/providers/packet/index.html.markdown new file mode 100644 index 000000000..bbe9f5d1e --- /dev/null +++ b/website/source/docs/providers/packet/index.html.markdown @@ -0,0 +1,47 @@ +--- +layout: "packet" +page_title: "Provider: Packet" +sidebar_current: "docs-packet-index" +description: |- + The Packet provider is used to interact with the resources supported by Packet. The provider needs to be configured with the proper credentials before it can be used. +--- + +# Packet Provider + +The Packet provider is used to interact with the resources supported by Packet. +The provider needs to be configured with the proper credentials before it can be used. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the Packet Provider +provider "packet" { + auth_token = "${var.auth_token}" +} + +# Create a project +resource "packet_project" "tf_project_1" { + name = "My First Terraform Project" + payment_method = "PAYMENT_METHOD_ID" +} + +# Create a device and add it to tf_project_1 +resource "packet_device" "web1" { + hostname = "tf.coreos2" + plan = "baremetal_1" + facility = "ewr1" + operating_system = "coreos_stable" + billing_cycle = "hourly" + project_id = "${packet_project.tf_project_1.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `auth_token` - (Required) This is your Packet API Auth token. This can also be specified + with the `PACKET_AUTH_TOKEN` shell environment variable. + diff --git a/website/source/docs/providers/packet/r/device.html.markdown b/website/source/docs/providers/packet/r/device.html.markdown new file mode 100644 index 000000000..6d57dcbb5 --- /dev/null +++ b/website/source/docs/providers/packet/r/device.html.markdown @@ -0,0 +1,55 @@ +--- +layout: "packet" +page_title: "Packet: packet_device" +sidebar_current: "docs-packet-resource-device" +description: |- + Provides a Packet device resource. This can be used to create, modify, and delete devices. +--- + +# packet\_device + +Provides a Packet device resource. This can be used to create, +modify, and delete devices. + +## Example Usage + +``` +# Create a device and add it to tf_project_1 +resource "packet_device" "web1" { + hostname = "tf.coreos2" + plan = "baremetal_1" + facility = "ewr1" + operating_system = "coreos_stable" + billing_cycle = "hourly" + project_id = "${packet_project.tf_project_1.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `hostname` - (Required) The device name +* `project_id` - (Required) The id of the project in which to create the device +* `operating_system` - (Required) The operating system slug +* `facility` - (Required) The facility in which to create the device +* `plan` - (Required) The config type slug +* `billing_cycle` - (Required) monthly or hourly +* `user_data` (Optional) - A string of the desired User Data for the device. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the device +* `hostname`- The hostname of the device +* `project_id`- The Id of the project the device belonds to +* `facility` - The facility the device is in +* `plan` - The config type of the device +* `network` - The private and public v4 and v6 IPs assigned to the device +* `locked` - Is the device locked +* `billing_cycle` - The billing cycle of the device (monthly or hourly) +* `operating_system` - The operating system running on the device +* `status` - The status of the device +* `created` - The timestamp for when the device was created +* `updated` - The timestamp for the last time the device was udpated diff --git a/website/source/docs/providers/packet/r/project.html.markdown b/website/source/docs/providers/packet/r/project.html.markdown new file mode 100644 index 000000000..d17190eec --- /dev/null +++ b/website/source/docs/providers/packet/r/project.html.markdown @@ -0,0 +1,40 @@ +--- +layout: "packet" +page_title: "Packet: packet_ssh_key" +sidebar_current: "docs-packet-resource-project" +description: |- + Provides a Packet Project resource. +--- + +# packet\_project + +Provides a Packet Project resource to allow you manage devices +in your projects. + +## Example Usage + +``` +# Create a new Project +resource "packet_project" "tf_project_1" { + name = "Terraform Fun" + payment_method = "payment-method-id" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the SSH key for identification +* `payment_method` - (Required) The id of the payment method on file to use for services created +on this project. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The unique ID of the key +* `payment_method` - The id of the payment method on file to use for services created +on this project. +* `created` - The timestamp for when the SSH key was created +* `updated` - The timestamp for the last time the SSH key was udpated diff --git a/website/source/docs/providers/packet/r/ssh_key.html.markdown b/website/source/docs/providers/packet/r/ssh_key.html.markdown new file mode 100644 index 000000000..cb27aaa77 --- /dev/null +++ b/website/source/docs/providers/packet/r/ssh_key.html.markdown @@ -0,0 +1,43 @@ +--- +layout: "packet" +page_title: "Packet: packet_ssh_key" +sidebar_current: "docs-packet-resource-ssh-key" +description: |- + Provides a Packet SSH key resource. +--- + +# packet\_ssh_key + +Provides a Packet SSH key resource to allow you manage SSH +keys on your account. All ssh keys on your account are loaded on +all new devices, they do not have to be explicitly declared on +device creation. + +## Example Usage + +``` +# Create a new SSH key +resource "packet_ssh_key" "key1" { + name = "terraform-1" + public_key = "${file("/home/terraform/.ssh/id_rsa.pub")}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the SSH key for identification +* `public_key` - (Required) The public key. If this is a file, it +can be read using the file interpolation function + +## Attributes Reference + +The following attributes are exported: + +* `id` - The unique ID of the key +* `name` - The name of the SSH key +* `public_key` - The text of the public key +* `fingerprint` - The fingerprint of the SSH key +* `created` - The timestamp for when the SSH key was created +* `updated` - The timestamp for the last time the SSH key was udpated From 09e336a80a6afac5a8c998a704c59db54d87259f Mon Sep 17 00:00:00 2001 From: Matti Savolainen Date: Fri, 3 Jul 2015 12:58:05 +0300 Subject: [PATCH 003/220] Fix Repository attribute in docker client PullOptions for private registries. --- .../docker/resource_docker_image_funcs.go | 4 +-- .../docker/resource_docker_image_test.go | 28 +++++++++++++++++-- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/builtin/providers/docker/resource_docker_image_funcs.go b/builtin/providers/docker/resource_docker_image_funcs.go index f45dd2226..454113c5f 100644 --- a/builtin/providers/docker/resource_docker_image_funcs.go +++ b/builtin/providers/docker/resource_docker_image_funcs.go @@ -83,7 +83,7 @@ func pullImage(data *Data, client *dc.Client, image string) error { splitPortRepo := strings.Split(splitImageName[1], "/") pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] pullOpts.Tag = splitImageName[2] - pullOpts.Repository = strings.Join(splitPortRepo[1:], "/") + pullOpts.Repository = pullOpts.Registry + "/" + strings.Join(splitPortRepo[1:], "/") // It's either registry:port/username/repo, registry:port/repo, // or repo:tag with default registry @@ -98,7 +98,7 @@ func pullImage(data *Data, client *dc.Client, image string) error { // registry:port/username/repo or registry:port/repo default: pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] - pullOpts.Repository = strings.Join(splitPortRepo[1:], "/") + pullOpts.Repository = pullOpts.Registry + "/" + strings.Join(splitPortRepo[1:], "/") pullOpts.Tag = "latest" } diff --git a/builtin/providers/docker/resource_docker_image_test.go b/builtin/providers/docker/resource_docker_image_test.go index 14dfb29b7..844b56329 100644 --- a/builtin/providers/docker/resource_docker_image_test.go +++ b/builtin/providers/docker/resource_docker_image_test.go @@ -1,9 +1,8 @@ package docker import ( - "testing" - "github.com/hashicorp/terraform/helper/resource" + "testing" ) func TestAccDockerImage_basic(t *testing.T) { @@ -24,9 +23,34 @@ func TestAccDockerImage_basic(t *testing.T) { }) } +func TestAddDockerImage_private(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAddDockerPrivateImageConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "docker_image.foobar", + "latest", + "2c40b0526b6358710fd09e7b8c022429268cc61703b4777e528ac9d469a07ca1"), + ), + }, + }, + }) +} + const testAccDockerImageConfig = ` resource "docker_image" "foo" { name = "ubuntu:trusty-20150320" keep_updated = true } ` + +const testAddDockerPrivateImageConfig = ` +resource "docker_image" "foobar" { + name = "gcr.io:443/google_containers/pause:0.8.0" + keep_updated = true +} +` From f6d69164e84a0e1efd396025f79e3743d28a0034 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 27 Aug 2015 17:13:59 +0200 Subject: [PATCH 004/220] examples: add OpenStack configuration with networking --- examples/openstack-with-networking/README.md | 63 +++++++++++++++ examples/openstack-with-networking/main.tf | 79 +++++++++++++++++++ .../openstack-with-networking/openrc.sample | 7 ++ examples/openstack-with-networking/outputs.tf | 3 + .../openstack-with-networking/variables.tf | 22 ++++++ 5 files changed, 174 insertions(+) create mode 100644 examples/openstack-with-networking/README.md create mode 100644 examples/openstack-with-networking/main.tf create mode 100644 examples/openstack-with-networking/openrc.sample create mode 100644 examples/openstack-with-networking/outputs.tf create mode 100644 examples/openstack-with-networking/variables.tf diff --git a/examples/openstack-with-networking/README.md b/examples/openstack-with-networking/README.md new file mode 100644 index 000000000..2f9d381ca --- /dev/null +++ b/examples/openstack-with-networking/README.md @@ -0,0 +1,63 @@ +# Basic OpenStack architecture with networking + +This provides a template for running a simple architecture on an OpenStack +cloud. + +To simplify the example, this intentionally ignores deploying and +getting your application onto the servers. However, you could do so either via +[provisioners](https://www.terraform.io/docs/provisioners/) and a configuration +management tool, or by pre-baking configured images with +[Packer](http://www.packer.io). + +After you run `terraform apply` on this configuration, it will output the +floating IP address assigned to the instance. After your instance started, +this should respond with the default nginx web page. + +First set the required environment variables for the OpenStack provider by +sourcing the [credentials file](http://docs.openstack.org/cli-reference/content/cli_openrc.html). + +``` +source openrc +``` + +Afterwards run with a command like this: + +``` +terraform apply \ + -var 'external_gateway=c1901f39-f76e-498a-9547-c29ba45f64df' \ + -var 'pool=public' +``` + +To get a list of usable floating IP pools run this command: + +``` +$ nova floating-ip-pool-list ++--------+ +| name | ++--------+ +| public | ++--------+ +``` + +To get the UUID of the external gateway run this command: + +``` +$ neutron net-show FLOATING_IP_POOL ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | True | +| id | c1901f39-f76e-498a-9547-c29ba45f64df | +| mtu | 0 | +| name | public | +| port_security_enabled | True | +| provider:network_type | vxlan | +| provider:physical_network | | +| provider:segmentation_id | 1092 | +| router:external | True | +| shared | False | +| status | ACTIVE | +| subnets | 42b672ae-8d51-4a18-a028-ddae7859ec4c | +| tenant_id | 1bde0a49d2ff44ffb44e6339a8cefe3a | ++---------------------------+--------------------------------------+ +``` diff --git a/examples/openstack-with-networking/main.tf b/examples/openstack-with-networking/main.tf new file mode 100644 index 000000000..d57925263 --- /dev/null +++ b/examples/openstack-with-networking/main.tf @@ -0,0 +1,79 @@ +resource "openstack_compute_keypair_v2" "terraform" { + name = "terraform" + public_key = "${file("${var.ssh_key_file}.pub")}" +} + +resource "openstack_networking_network_v2" "terraform" { + name = "terraform" + admin_state_up = "true" +} + +resource "openstack_networking_subnet_v2" "terraform" { + name = "terraform" + network_id = "${openstack_networking_network_v2.terraform.id}" + cidr = "10.0.0.0/24" + ip_version = 4 + dns_nameservers = ["8.8.8.8","8.8.4.4"] +} + +resource "openstack_networking_router_v2" "terraform" { + name = "terraform" + admin_state_up = "true" + external_gateway = "${var.external_gateway}" +} + +resource "openstack_networking_router_interface_v2" "terraform" { + router_id = "${openstack_networking_router_v2.terraform.id}" + subnet_id = "${openstack_networking_subnet_v2.terraform.id}" +} + +resource "openstack_compute_secgroup_v2" "terraform" { + name = "terraform" + description = "Security group for the Terraform example instances" + rule { + from_port = 22 + to_port = 22 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + rule { + from_port = 80 + to_port = 80 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + rule { + from_port = -1 + to_port = -1 + ip_protocol = "icmp" + cidr = "0.0.0.0/0" + } +} + +resource "openstack_compute_floatingip_v2" "terraform" { + pool = "${var.pool}" + depends_on = ["openstack_networking_router_interface_v2.terraform"] +} + +resource "openstack_compute_instance_v2" "terraform" { + name = "terraform" + image_name = "${var.image}" + flavor_name = "${var.flavor}" + key_pair = "${openstack_compute_keypair_v2.terraform.name}" + security_groups = [ "${openstack_compute_secgroup_v2.terraform.name}" ] + floating_ip = "${openstack_compute_floatingip_v2.terraform.address}" + network { + uuid = "${openstack_networking_network_v2.terraform.id}" + } + provisioner "remote-exec" { + connection { + user = "${var.ssh_user_name}" + key_file = "${var.ssh_key_file}" + } + inline = [ + "sudo apt-get -y update", + "sudo apt-get -y install nginx", + "sudo service nginx start" + ] + } +} diff --git a/examples/openstack-with-networking/openrc.sample b/examples/openstack-with-networking/openrc.sample new file mode 100644 index 000000000..c9a38e0a1 --- /dev/null +++ b/examples/openstack-with-networking/openrc.sample @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +export OS_AUTH_URL=http://KEYSTONE.ENDPOINT.URL:5000/v2.0 +export OS_TENANT_NAME=YOUR_TENANT_NAME +export OS_USERNAME=YOUR_USERNAME +export OS_PASSWORD=YOUR_PASSWORD +export OS_REGION_NAME=YOUR_REGION_NAME diff --git a/examples/openstack-with-networking/outputs.tf b/examples/openstack-with-networking/outputs.tf new file mode 100644 index 000000000..42f923fe2 --- /dev/null +++ b/examples/openstack-with-networking/outputs.tf @@ -0,0 +1,3 @@ +output "address" { + value = "${openstack_compute_floatingip_v2.terraform.address}" +} diff --git a/examples/openstack-with-networking/variables.tf b/examples/openstack-with-networking/variables.tf new file mode 100644 index 000000000..3477cf67e --- /dev/null +++ b/examples/openstack-with-networking/variables.tf @@ -0,0 +1,22 @@ +variable "image" { + default = "Ubuntu 14.04" +} + +variable "flavor" { + default = "m1.small" +} + +variable "ssh_key_file" { + default = "~/.ssh/id_rsa.terraform" +} + +variable "ssh_user_name" { + default = "ubuntu" +} + +variable "external_gateway" { +} + +variable "pool" { + default = "public" +} From 72e421942e3bda361362b6f349e816566d0c8ef7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcello=20Lagan=C3=A0?= Date: Tue, 1 Sep 2015 17:21:49 +0200 Subject: [PATCH 005/220] Support tags for aws_db_subnet_group --- .../aws/resource_aws_db_subnet_group.go | 26 +++++++++++++++++++ .../aws/resource_aws_db_subnet_group_test.go | 3 +++ .../aws/r/db_subnet_group.html.markdown | 4 +++ 3 files changed, 33 insertions(+) diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go index 9c09b72d7..709809c4a 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group.go @@ -56,12 +56,15 @@ func resourceAwsDbSubnetGroup() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, + + "tags": tagsSchema(), }, } } func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { rdsconn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) subnetIdsSet := d.Get("subnet_ids").(*schema.Set) subnetIds := make([]*string, subnetIdsSet.Len()) @@ -73,6 +76,7 @@ func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) er DBSubnetGroupName: aws.String(d.Get("name").(string)), DBSubnetGroupDescription: aws.String(d.Get("description").(string)), SubnetIds: subnetIds, + Tags: tags, } log.Printf("[DEBUG] Create DB Subnet Group: %#v", createOpts) @@ -130,6 +134,28 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro } d.Set("subnet_ids", subnets) + // list tags for resource + // set tags + conn := meta.(*AWSClient).rdsconn + arn, err := buildRDSARN(d, meta) + if err != nil { + log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", subnetGroup.DBSubnetGroupName) + } else { + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + return nil } diff --git a/builtin/providers/aws/resource_aws_db_subnet_group_test.go b/builtin/providers/aws/resource_aws_db_subnet_group_test.go index cbf1f8497..e189b1e21 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group_test.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group_test.go @@ -150,6 +150,9 @@ resource "aws_db_subnet_group" "foo" { name = "FOO" description = "foo description" subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + tags { + Name = "tf-dbsubnet-group-test" + } } ` diff --git a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown index 2937b54e7..e3dcd18ed 100644 --- a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown @@ -17,6 +17,9 @@ resource "aws_db_subnet_group" "default" { name = "main" description = "Our main group of subnets" subnet_ids = ["${aws_subnet.frontend.id}", "${aws_subnet.backend.id}"] + tags { + Name = "My DB subnet group" + } } ``` @@ -27,6 +30,7 @@ The following arguments are supported: * `name` - (Required) The name of the DB subnet group. * `description` - (Required) The description of the DB subnet group. * `subnet_ids` - (Required) A list of VPC subnet IDs. +* `tags` - (Optional) A mapping of tags to assign to the resource. ## Attributes Reference From d9c4afce216cad4e9c1a89a11c8ae01deab597b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcello=20Lagan=C3=A0?= Date: Tue, 1 Sep 2015 17:38:51 +0200 Subject: [PATCH 006/220] Modify tags on update and fix tests --- builtin/providers/aws/resource_aws_db_subnet_group.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go index 709809c4a..3de717e66 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group.go @@ -139,7 +139,7 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro conn := meta.(*AWSClient).rdsconn arn, err := buildRDSARN(d, meta) if err != nil { - log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", subnetGroup.DBSubnetGroupName) + log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName) } else { resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ ResourceName: aws.String(arn), @@ -182,6 +182,15 @@ func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) er return err } } + + if arn, err := buildRDSARN(d, meta); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + return resourceAwsDbSubnetGroupRead(d, meta) } From 98808cb9b8f7f1126aebe9d1e1da715ec3ef1224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcello=20Lagan=C3=A0?= Date: Wed, 2 Sep 2015 09:24:34 +0200 Subject: [PATCH 007/220] Build RDS subgrp ARN --- .../aws/resource_aws_db_subnet_group.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go index 3de717e66..e6b17ea1f 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -137,7 +138,7 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro // list tags for resource // set tags conn := meta.(*AWSClient).rdsconn - arn, err := buildRDSARN(d, meta) + arn, err := buildRDSsubgrpARN(d, meta) if err != nil { log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName) } else { @@ -183,7 +184,7 @@ func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) er } } - if arn, err := buildRDSARN(d, meta); err == nil { + if arn, err := buildRDSsubgrpARN(d, meta); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { @@ -231,3 +232,17 @@ func resourceAwsDbSubnetGroupDeleteRefreshFunc( return d, "destroyed", nil } } + +func buildRDSsubgrpARN(d *schema.ResourceData, meta interface{}) (string, error) { + iamconn := meta.(*AWSClient).iamconn + region := meta.(*AWSClient).region + // An zero value GetUserInput{} defers to the currently logged in user + resp, err := iamconn.GetUser(&iam.GetUserInput{}) + if err != nil { + return "", err + } + userARN := *resp.User.Arn + accountID := strings.Split(userARN, ":")[4] + arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountID, d.Id()) + return arn, nil +} From 5001bb078e06566d2f9e7dd438aaafa103a6c8d7 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 14:44:12 +0100 Subject: [PATCH 008/220] provider/aws: Add new resource - aws_iam_saml_provider --- builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_iam_saml_provider.go | 101 ++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_iam_saml_provider.go diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index a5029b400..9a00edffc 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -187,6 +187,7 @@ func Provider() terraform.ResourceProvider { "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), "aws_iam_role_policy": resourceAwsIamRolePolicy(), "aws_iam_role": resourceAwsIamRole(), + "aws_iam_saml_provider": resourceAwsIamSamlProvider(), "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), "aws_iam_user_policy": resourceAwsIamUserPolicy(), "aws_iam_user": resourceAwsIamUser(), diff --git a/builtin/providers/aws/resource_aws_iam_saml_provider.go b/builtin/providers/aws/resource_aws_iam_saml_provider.go new file mode 100644 index 000000000..6a166d711 --- /dev/null +++ b/builtin/providers/aws/resource_aws_iam_saml_provider.go @@ -0,0 +1,101 @@ +package aws + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamSamlProvider() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamSamlProviderCreate, + Read: resourceAwsIamSamlProviderRead, + Update: resourceAwsIamSamlProviderUpdate, + Delete: resourceAwsIamSamlProviderDelete, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "valid_until": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "saml_metadata_document": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsIamSamlProviderCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.CreateSAMLProviderInput{ + Name: aws.String(d.Get("name").(string)), + SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)), + } + + out, err := iamconn.CreateSAMLProvider(input) + if err != nil { + return err + } + + d.SetId(*out.SAMLProviderArn) + + return resourceAwsIamSamlProviderRead(d, meta) +} + +func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + } + out, err := iamconn.GetSAMLProvider(input) + if err != nil { + return err + } + + validUntil := out.ValidUntil.Format(time.RFC1123) + d.Set("valid_until", validUntil) + d.Set("saml_metadata_document", *out.SAMLMetadataDocument) + + return nil +} + +func resourceAwsIamSamlProviderUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.UpdateSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)), + } + _, err := iamconn.UpdateSAMLProvider(input) + if err != nil { + return err + } + + return resourceAwsIamSamlProviderRead(d, meta) +} + +func resourceAwsIamSamlProviderDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.DeleteSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + } + _, err := iamconn.DeleteSAMLProvider(input) + + return err +} From ac762e5503b1c0661329efec543614a3a0fe34a3 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 20:01:36 +0100 Subject: [PATCH 009/220] provider/aws: Add docs for aws_iam_saml_provider --- .../aws/r/iam_saml_provider.html.markdown | 34 +++++++++++++++++++ website/source/layouts/aws.erb | 4 +++ 2 files changed, 38 insertions(+) create mode 100644 website/source/docs/providers/aws/r/iam_saml_provider.html.markdown diff --git a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown new file mode 100644 index 000000000..adba6d350 --- /dev/null +++ b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown @@ -0,0 +1,34 @@ +--- +layout: "aws" +page_title: "AWS: aws_saml_provider" +sidebar_current: "docs-aws-resource-iam-saml-provider" +description: |- + Provides an IAM SAML provider. +--- + +# aws\_iam\_saml\_provider + +Provides an IAM SAML provider. + +## Example Usage + +``` +resource "aws_saml_provider" "default" { + name = "myprovider" + saml_metadata_document = "${file("saml-metadata.xml")}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the provider to create. +* `saml_metadata_document` - (Required) An XML document generated by an identity provider that supports SAML 2.0. + +## Attributes Reference + +The following attributes are exported: + +* `arn` - The ARN assigned by AWS for this provider. +* `valid_until` - The expiration date and time for the SAML provider in RFC1123 format, e.g. `Mon, 02 Jan 2006 15:04:05 MST`. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 2bbff22f4..e07992b84 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -179,6 +179,10 @@ aws_iam_role_policy + > + aws_iam_saml_provider + + > aws_iam_server_certificate From 5d215c42db6aef7b7cf86bc3dee37c41fa8327a1 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 20:02:00 +0100 Subject: [PATCH 010/220] provider/aws: Add acceptance test for aws_iam_saml_provider --- .../resource_aws_iam_saml_provider_test.go | 79 +++++++++++++++++++ .../test-fixtures/saml-metadata-modified.xml | 14 ++++ .../aws/test-fixtures/saml-metadata.xml | 14 ++++ 3 files changed, 107 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_iam_saml_provider_test.go create mode 100644 builtin/providers/aws/test-fixtures/saml-metadata-modified.xml create mode 100644 builtin/providers/aws/test-fixtures/saml-metadata.xml diff --git a/builtin/providers/aws/resource_aws_iam_saml_provider_test.go b/builtin/providers/aws/resource_aws_iam_saml_provider_test.go new file mode 100644 index 000000000..63ed39588 --- /dev/null +++ b/builtin/providers/aws/resource_aws_iam_saml_provider_test.go @@ -0,0 +1,79 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSIAMSamlProvider_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIAMSamlProviderDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccIAMSamlProviderConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"), + ), + }, + resource.TestStep{ + Config: testAccIAMSamlProviderConfigUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"), + ), + }, + }, + }) +} + +func testAccCheckIAMSamlProviderDestroy(s *terraform.State) error { + if len(s.RootModule().Resources) > 0 { + return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources) + } + + return nil +} + +func testAccCheckIAMSamlProvider(id string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[id] + if !ok { + return fmt.Errorf("Not Found: %s", id) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + iamconn := testAccProvider.Meta().(*AWSClient).iamconn + _, err := iamconn.GetSAMLProvider(&iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + return nil + } +} + +const testAccIAMSamlProviderConfig = ` +resource "aws_iam_saml_provider" "salesforce" { + name = "tf-salesforce-test" + saml_metadata_document = "${file("./test-fixtures/saml-metadata.xml")}" +} +` + +const testAccIAMSamlProviderConfigUpdate = ` +resource "aws_iam_saml_provider" "salesforce" { + name = "tf-salesforce-test" + saml_metadata_document = "${file("./test-fixtures/saml-metadata-modified.xml")}" +} +` diff --git a/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml b/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml new file mode 100644 index 000000000..aaca7afc0 --- /dev/null +++ b/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml @@ -0,0 +1,14 @@ + + + + + + MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + + + + diff --git a/builtin/providers/aws/test-fixtures/saml-metadata.xml b/builtin/providers/aws/test-fixtures/saml-metadata.xml new file mode 100644 index 000000000..69e353b77 --- /dev/null +++ b/builtin/providers/aws/test-fixtures/saml-metadata.xml @@ -0,0 +1,14 @@ + + + + + + MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + + + + From b06f0bbf4aa4d2e09b5715adf4fb6a455735a807 Mon Sep 17 00:00:00 2001 From: Joshua Semar Date: Thu, 3 Sep 2015 10:33:59 -0500 Subject: [PATCH 011/220] fix documentation --- .../aws/r/launch_configuration.html.markdown | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown index ea96503dc..85d45bcb0 100644 --- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown +++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown @@ -23,10 +23,10 @@ resource "aws_launch_configuration" "as_conf" { ## Using with AutoScaling Groups Launch Configurations cannot be updated after creation with the Amazon -Web Service API. In order to update a Launch Configuration, Terraform will -destroy the existing resource and create a replacement. If order to effectively -use a Launch Configuration resource with an[AutoScaling Group resource][1], -it's recommend to omit the Launch Configuration `name` attribute, and +Web Service API. In order to update a Launch Configuration, Terraform will +destroy the existing resource and create a replacement. If order to effectively +use a Launch Configuration resource with an[AutoScaling Group resource][1], +it's recommend to omit the Launch Configuration `name` attribute, and specify `create_before_destroy` in a [lifecycle][2] block, as shown: ``` @@ -69,7 +69,12 @@ The following arguments are supported: * `user_data` - (Optional) The user data to provide when launching the instance. * `enable_monitoring` - (Optional) Enables/disables detailed monitoring. This is enabled by default. * `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. -* `block_device_mapping` - (Optional) A list of block devices to add. Their keys are documented below. +* `root_block_device` - (Optional) Customize details about the root block + device of the instance. See [Block Devices](#block-devices) below for details. +* `ebs_block_device` - (Optional) Additional EBS block devices to attach to the + instance. See [Block Devices](#block-devices) below for details. +* `ephemeral_block_device` - (Optional) Customize Ephemeral (also known as + "Instance Store") volumes on the instance. See [Block Devices](#block-devices) below for details. ## Block devices From 10c96afa9b3daade66d1911b1c8575f35c8aa786 Mon Sep 17 00:00:00 2001 From: Mike Fiedler Date: Tue, 8 Sep 2015 09:10:54 -0400 Subject: [PATCH 012/220] Update aws_db_instance `db_subnet_group_name` When launching a new RDS instance in a VPC-default AWS account, trying to control which VPC the new RDS instance lands in is not apparent from the parameters available. The following works: ``` resource "aws_db_subnet_group" "foo" { name = "foo" description = "DB Subnet for foo" subnet_ids = ["${aws_subnet.foo_1a.id}", "${aws_subnet.foo_1b.id}"] } resource "aws_db_instance" "bar" { ... db_subnet_group_name = "${aws_db_subnet_group.foo.name}" ... } ``` Hopefully this doc update will help others --- website/source/docs/providers/aws/r/db_instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown index c2f0063f4..adf2dafe6 100644 --- a/website/source/docs/providers/aws/r/db_instance.html.markdown +++ b/website/source/docs/providers/aws/r/db_instance.html.markdown @@ -65,7 +65,7 @@ The following arguments are supported: * `vpc_security_group_ids` - (Optional) List of VPC security groups to associate. * `security_group_names` - (Optional/Deprecated) List of DB Security Groups to associate. Only used for [DB Instances on the _EC2-Classic_ Platform](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html#USER_VPC.FindDefaultVPC). -* `db_subnet_group_name` - (Optional) Name of DB subnet group +* `db_subnet_group_name` - (Optional) Name of DB subnet group. DB instance will be created in the VPC associated with the DB subnet group. If unspecified, will be created in the `default` VPC, or in EC2 Classic, if available. * `parameter_group_name` - (Optional) Name of the DB parameter group to associate. * `storage_encrypted` - (Optional) Specifies whether the DB instance is encrypted. The default is `false` if not specified. * `apply_immediately` - (Optional) Specifies whether any database modifications From 506aae2f285ea216ec6f93a5b7b441b8b091981b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 8 Sep 2015 13:15:30 -0500 Subject: [PATCH 013/220] provider/aws: configurable capacity waiting duration move wait for capacity timeout from a constant to a configurable --- .../aws/resource_aws_autoscaling_group.go | 36 ++++++++++++++++--- .../aws/r/autoscaling_group.html.markdown | 23 ++++++++---- 2 files changed, 48 insertions(+), 11 deletions(-) diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index 771bda2e3..b96d6885a 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -120,6 +120,25 @@ func resourceAwsAutoscalingGroup() *schema.Resource { Set: schema.HashString, }, + "wait_for_capacity_timeout": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "10m", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + duration, err := time.ParseDuration(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as a duration: %s", k, err)) + } + if duration < 0 { + errors = append(errors, fmt.Errorf( + "%q must be greater than zero", k)) + } + return + }, + }, + "tag": autoscalingTagsSchema(), }, } @@ -445,8 +464,6 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) }) } -var waitForASGCapacityTimeout = 10 * time.Minute - // Waits for a minimum number of healthy instances to show up as healthy in the // ASG before continuing. Waits up to `waitForASGCapacityTimeout` for // "desired_capacity", or "min_size" if desired capacity is not specified. @@ -461,9 +478,20 @@ func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error { } wantELB := d.Get("min_elb_capacity").(int) - log.Printf("[DEBUG] Waiting for capacity: %d ASG, %d ELB", wantASG, wantELB) + wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string)) + if err != nil { + return err + } - return resource.Retry(waitForASGCapacityTimeout, func() error { + if wait == 0 { + log.Printf("[DEBUG] Capacity timeout set to 0, skipping capacity waiting.") + return nil + } + + log.Printf("[DEBUG] Waiting %s for capacity: %d ASG, %d ELB", + wait, wantASG, wantELB) + + return resource.Retry(wait, func() error { g, err := getAwsAutoscalingGroup(d, meta) if err != nil { return resource.RetryError{Err: err} diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown index 022b1cf71..caf272c94 100644 --- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown @@ -63,6 +63,11 @@ The following arguments are supported: * `vpc_zone_identifier` (Optional) A list of subnet IDs to launch resources in. * `termination_policies` (Optional) A list of policies to decide how the instances in the auto scale group should be terminated. * `tag` (Optional) A list of tag blocks. Tags documented below. +* `wait_for_capacity_timeout` (Default: "10m") A maximum + [duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should + wait for ASG instances to be healthy before timing out. (See also [Waiting + for Capacity](#waiting-for-capacity) below.) Setting this to "0" causes + Terraform to skip all Capacity Waiting behavior. Tags support the following: @@ -110,9 +115,12 @@ Terraform considers an instance "healthy" when the ASG reports `HealthStatus: Docs](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) for more information on an ASG's lifecycle. -Terraform will wait for healthy instances for up to 10 minutes. If ASG creation -is taking more than a few minutes, it's worth investigating for scaling activity -errors, which can be caused by problems with the selected Launch Configuration. +Terraform will wait for healthy instances for up to +`wait_for_capacity_timeout`. If ASG creation is taking more than a few minutes, +it's worth investigating for scaling activity errors, which can be caused by +problems with the selected Launch Configuration. + +Setting `wait_for_capacity_timeout` to `"0"` disables ASG Capacity waiting. #### Waiting for ELB Capacity @@ -121,8 +129,9 @@ Balancers. If `min_elb_capacity` is set, Terraform will wait for that number of Instances to be `"InService"` in all attached `load_balancers`. This can be used to ensure that service is being provided before Terraform moves on. -As with ASG Capacity, Terraform will wait for up to 10 minutes for -`"InService"` instances. If ASG creation takes more than a few minutes, this -could indicate one of a number of configuration problems. See the [AWS Docs on -Load Balancer Troubleshooting](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-troubleshooting.html) +As with ASG Capacity, Terraform will wait for up to `wait_for_capacity_timeout` +(for `"InService"` instances. If ASG creation takes more than a few minutes, +this could indicate one of a number of configuration problems. See the [AWS +Docs on Load Balancer +Troubleshooting](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-troubleshooting.html) for more information. From 03f94d66aef7fde266ad8e2f831c373ea37b99ad Mon Sep 17 00:00:00 2001 From: zpatrick Date: Wed, 9 Sep 2015 21:13:36 +0000 Subject: [PATCH 014/220] adding content field to s3_bucket_object --- .../aws/resource_aws_s3_bucket_object.go | 31 ++++++++++++--- .../aws/resource_aws_s3_bucket_object_test.go | 39 +++++++++++++++++-- 2 files changed, 61 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object.go b/builtin/providers/aws/resource_aws_s3_bucket_object.go index 9d46952d0..8a2e8370b 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object.go @@ -4,6 +4,8 @@ import ( "fmt" "log" "os" + "io" + "bytes" "github.com/hashicorp/terraform/helper/schema" @@ -34,10 +36,18 @@ func resourceAwsS3BucketObject() *schema.Resource { "source": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, + ConflictsWith: []string{"content"}, }, + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"source"}, + }, + "etag": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -51,19 +61,28 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro bucket := d.Get("bucket").(string) key := d.Get("key").(string) - source := d.Get("source").(string) + var body io.ReadSeeker - file, err := os.Open(source) + if v, ok := d.GetOk("source"); ok { + source := v.(string) + file, err := os.Open(source) + if err != nil { + return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err) + } - if err != nil { - return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err) + body = file + } else if v, ok := d.GetOk("content"); ok { + content := v.(string) + body = bytes.NewReader([]byte(content)) + } else { + return fmt.Errorf("Must specify \"source\" or \"content\" field") } resp, err := s3conn.PutObject( &s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), - Body: file, + Body: body, }) if err != nil { diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go index 4f947736a..6311dd7c3 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go @@ -15,7 +15,7 @@ import ( var tf, err = ioutil.TempFile("", "tf") -func TestAccAWSS3BucketObject_basic(t *testing.T) { +func TestAccAWSS3BucketObject_source(t *testing.T) { // first write some data to the tempfile just so it's not 0 bytes. ioutil.WriteFile(tf.Name(), []byte("{anything will do }"), 0644) resource.Test(t, resource.TestCase{ @@ -29,7 +29,26 @@ func TestAccAWSS3BucketObject_basic(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSS3BucketObjectConfig, + Config: testAccAWSS3BucketObjectConfigSource, + Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_content(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSS3BucketObjectConfigContent, Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"), }, }, @@ -86,7 +105,7 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc { } var randomBucket = randInt -var testAccAWSS3BucketObjectConfig = fmt.Sprintf(` +var testAccAWSS3BucketObjectConfigSource = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { bucket = "tf-object-test-bucket-%d" } @@ -97,3 +116,17 @@ resource "aws_s3_bucket_object" "object" { source = "%s" } `, randomBucket, tf.Name()) + + +var testAccAWSS3BucketObjectConfigContent = fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "some_bucket_content" +} +`, randomBucket) + From 141c419cc70827ecc97211889913f6cdd1b59cb3 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 9 Sep 2015 23:17:57 -0700 Subject: [PATCH 015/220] Docs for aws_s3_bucket content argument. --- .../docs/providers/aws/r/s3_bucket_object.html.markdown | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown index 63d201b82..14286a603 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown @@ -28,7 +28,11 @@ The following arguments are supported: * `bucket` - (Required) The name of the bucket to put the file in. * `key` - (Required) The name of the object once it is in the bucket. -* `source` - (Required) The path to the source file being uploaded to the bucket. +* `source` - (Required unless `content` given) The path to the source file being uploaded to the bucket. +* `content` - (Required unless `source` given) The literal content being uploaded to the bucket. + +Either `source` or `content` must be provided to specify the bucket content. +These two arguments are mutually-exclusive. ## Attributes Reference From 5256a6df6b7677d26b63efe1c5a932e2cce884b3 Mon Sep 17 00:00:00 2001 From: zpatrick Date: Thu, 10 Sep 2015 18:37:17 +0000 Subject: [PATCH 016/220] fix formatting --- .../aws/resource_aws_s3_bucket_object.go | 21 ++++++++++--------- .../aws/resource_aws_s3_bucket_object_test.go | 11 ++++------ 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object.go b/builtin/providers/aws/resource_aws_s3_bucket_object.go index 8a2e8370b..3a4cc4df2 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object.go @@ -1,11 +1,11 @@ package aws import ( + "bytes" "fmt" + "io" "log" "os" - "io" - "bytes" "github.com/hashicorp/terraform/helper/schema" @@ -35,18 +35,18 @@ func resourceAwsS3BucketObject() *schema.Resource { }, "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, ConflictsWith: []string{"content"}, }, - "content": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, ConflictsWith: []string{"source"}, - }, + }, "etag": &schema.Schema{ Type: schema.TypeString, @@ -138,3 +138,4 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e } return nil } + diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go index 6311dd7c3..0e0651ad0 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go @@ -107,22 +107,19 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc { var randomBucket = randInt var testAccAWSS3BucketObjectConfigSource = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%d" + bucket = "tf-object-test-bucket-%d" } - resource "aws_s3_bucket_object" "object" { - bucket = "${aws_s3_bucket.object_bucket.bucket}" - key = "test-key" - source = "%s" + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + source = "%s" } `, randomBucket, tf.Name()) - var testAccAWSS3BucketObjectConfigContent = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { bucket = "tf-object-test-bucket-%d" } - resource "aws_s3_bucket_object" "object" { bucket = "${aws_s3_bucket.object_bucket.bucket}" key = "test-key" From 863a7383aa5b50ab23d497d7d895d1da392539e8 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 10 Sep 2015 16:08:48 -0500 Subject: [PATCH 017/220] doc: module sources from private github repos --- .../source/docs/modules/sources.html.markdown | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/website/source/docs/modules/sources.html.markdown b/website/source/docs/modules/sources.html.markdown index b0a2b4d0c..d9e6a1316 100644 --- a/website/source/docs/modules/sources.html.markdown +++ b/website/source/docs/modules/sources.html.markdown @@ -81,6 +81,30 @@ You can use the same parameters to GitHub repositories as you can generic Git repositories (such as tags or branches). See the documentation for generic Git repositories for more information. +#### Private GitHub Repos + +If you need Terraform to be able to fetch modules from private GitHub repos on +a remote machine (like a Atlas or a CI server), you'll need to provide +Terraform with credentials that can be used to authenticate as a user with read +access to the private repo. + +First, create a [machine +user](https://developer.github.com/guides/managing-deploy-keys/#machine-users) +with access to read from the private repo in question, then embed this user's +credentials into the source field: + +``` +module "private-infra" { + source = "git::https://MACHINE-USER:MACHINE-PASS@github.com/org/privatemodules//modules/foo" +} +``` + +Note that Terraform does not yet support interpolations in the `source` field, +so the machine username and password will have to be embedded directly into the +source string. You can track +[GH-1439](https://github.com/hashicorp/terraform/issues/1439) to learn when this +limitation is lifted. + ## BitBucket Terraform will automatically recognize BitBucket URLs and turn them into From 3d77d158f7270472446b9e1fe461487c9763c91c Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 14 Sep 2015 10:38:29 +0100 Subject: [PATCH 018/220] remote/s3: Add support for ACL --- state/remote/s3.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/state/remote/s3.go b/state/remote/s3.go index c2d897dd0..26330d112 100644 --- a/state/remote/s3.go +++ b/state/remote/s3.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "log" "os" "strconv" @@ -45,6 +46,11 @@ func s3Factory(conf map[string]string) (Client, error) { serverSideEncryption = v } + acl := "" + if raw, ok := conf["acl"]; ok { + acl = raw + } + accessKeyId := conf["access_key"] secretAccessKey := conf["secret_key"] @@ -77,6 +83,7 @@ func s3Factory(conf map[string]string) (Client, error) { bucketName: bucketName, keyName: keyName, serverSideEncryption: serverSideEncryption, + acl: acl, }, nil } @@ -85,6 +92,7 @@ type S3Client struct { bucketName string keyName string serverSideEncryption bool + acl string } func (c *S3Client) Get() (*Payload, error) { @@ -140,6 +148,12 @@ func (c *S3Client) Put(data []byte) error { i.ServerSideEncryption = aws.String("AES256") } + if c.acl != "" { + i.ACL = aws.String(c.acl) + } + + log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) + if _, err := c.nativeClient.PutObject(i); err == nil { return nil } else { From 4f7f20ba23b3b46680005a0efd4f06c80ad9a2b5 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 14 Sep 2015 10:36:55 +0100 Subject: [PATCH 019/220] remote/s3: Add some docs for supported parameters --- website/source/docs/commands/remote-config.html.markdown | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown index c7586ac0e..73a06f821 100644 --- a/website/source/docs/commands/remote-config.html.markdown +++ b/website/source/docs/commands/remote-config.html.markdown @@ -57,6 +57,13 @@ The following backends are supported: in the `access_key`, `secret_key` and `region` variables respectively, but passing credentials this way is not recommended since they will be included in cleartext inside the persisted state. + Other supported parameters include: + * `bucket` - the name of the S3 bucket + * `key` - path where to place/look for state file inside the bucket + * `encrypt` - whether to enable [server side encryption](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + of the state file + * `acl` - [Canned ACL](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) + to be applied to the state file. * HTTP - Stores the state using a simple REST client. State will be fetched via GET, updated via POST, and purged with DELETE. Requires the `address` variable. From 55f3c8c76498cf181a1f3605b0e796e5cac6be07 Mon Sep 17 00:00:00 2001 From: thrashr888 Date: Mon, 14 Sep 2015 16:50:53 -0700 Subject: [PATCH 020/220] provider/aws: aws_elasticache_cluster normalizes name to lowercase --- .../aws/resource_aws_elasticache_cluster.go | 12 +++++++++++- .../aws/resource_aws_elasticache_cluster_test.go | 5 ++++- .../aws/r/elasticache_cluster.html.markdown | 4 ++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 080c56ac9..520ea1342 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -28,6 +28,12 @@ func resourceAwsElasticacheCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + StateFunc: func(val interface{}) string { + // Elasticache normalizes cluster ids to lowercase, + // so we have to do this too or else we can end up + // with non-converging diffs. + return strings.ToLower(val.(string)) + }, }, "engine": &schema.Schema{ Type: schema.TypeString, @@ -190,7 +196,11 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error creating Elasticache: %s", err) } - d.SetId(*resp.CacheCluster.CacheClusterId) + // Assign the cluster id as the resource ID + // Elasticache always retains the id in lower case, so we have to + // mimic that or else we won't be able to refresh a resource whose + // name contained uppercase characters. + d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId)) pending := []string{"creating"} stateConf := &resource.StateChangeConf{ diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index caa14a8df..173ca21ea 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -163,7 +163,10 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-test-%03d" + // Including uppercase letters in this name to ensure + // that we correctly handle the fact that the API + // normalizes names to lowercase. + cluster_id = "tf-TEST-%03d" node_type = "cache.m1.small" num_cache_nodes = 1 engine = "redis" diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 953b78a9c..dc4df4c2a 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -27,8 +27,8 @@ resource "aws_elasticache_cluster" "bar" { The following arguments are supported: -* `cluster_id` – (Required) Group identifier. This parameter is stored as a -lowercase string +* `cluster_id` – (Required) Group identifier. Elasticache converts + this name to lowercase * `engine` – (Required) Name of the cache engine to be used for this cache cluster. Valid values for this parameter are `memcached` or `redis` From b224abb7a9b09247c3913c28d68870a6471efe86 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 16 Sep 2015 22:02:28 +0100 Subject: [PATCH 021/220] provider/aws: Add cloudwatch_log_group --- builtin/providers/aws/config.go | 37 +++-- builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_cloudwatch_log_group.go | 146 ++++++++++++++++++ 3 files changed, 168 insertions(+), 16 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_cloudwatch_log_group.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index a57c65c1b..c1fc7ca92 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecs" @@ -41,22 +42,23 @@ type Config struct { } type AWSClient struct { - cloudwatchconn *cloudwatch.CloudWatch - dynamodbconn *dynamodb.DynamoDB - ec2conn *ec2.EC2 - ecsconn *ecs.ECS - elbconn *elb.ELB - autoscalingconn *autoscaling.AutoScaling - s3conn *s3.S3 - sqsconn *sqs.SQS - snsconn *sns.SNS - r53conn *route53.Route53 - region string - rdsconn *rds.RDS - iamconn *iam.IAM - kinesisconn *kinesis.Kinesis - elasticacheconn *elasticache.ElastiCache - lambdaconn *lambda.Lambda + cloudwatchconn *cloudwatch.CloudWatch + cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs + dynamodbconn *dynamodb.DynamoDB + ec2conn *ec2.EC2 + ecsconn *ecs.ECS + elbconn *elb.ELB + autoscalingconn *autoscaling.AutoScaling + s3conn *s3.S3 + sqsconn *sqs.SQS + snsconn *sns.SNS + r53conn *route53.Route53 + region string + rdsconn *rds.RDS + iamconn *iam.IAM + kinesisconn *kinesis.Kinesis + elasticacheconn *elasticache.ElastiCache + lambdaconn *lambda.Lambda } // Client configures and returns a fully initialized AWSClient @@ -156,6 +158,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing CloudWatch SDK connection") client.cloudwatchconn = cloudwatch.New(awsConfig) + + log.Println("[INFO] Initializing CloudWatch Logs connection") + client.cloudwatchlogsconn = cloudwatchlogs.New(awsConfig) } if len(errs) > 0 { diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 6b2c16c7a..16e4f3789 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -163,6 +163,7 @@ func Provider() terraform.ResourceProvider { "aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), + "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_customer_gateway": resourceAwsCustomerGateway(), "aws_db_instance": resourceAwsDbInstance(), diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_group.go b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go new file mode 100644 index 000000000..e4f7236b2 --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go @@ -0,0 +1,146 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +func resourceAwsCloudWatchLogGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchLogGroupCreate, + Read: resourceAwsCloudWatchLogGroupRead, + Update: resourceAwsCloudWatchLogGroupUpdate, + Delete: resourceAwsCloudWatchLogGroupDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "retention_in_days": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", d.Get("name").(string)) + _, err := conn.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return fmt.Errorf("Creating CloudWatch Log Group failed: %s", err) + } + + d.SetId(d.Get("name").(string)) + + log.Println("[INFO] CloudWatch Log Group created") + + return resourceAwsCloudWatchLogGroupUpdate(d, meta) +} + +func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[DEBUG] Reading CloudWatch Log Group: %q", d.Get("name").(string)) + lg, err := lookupCloudWatchLogGroup(conn, d.Get("name").(string), nil) + if err != nil { + return err + } + + log.Printf("[DEBUG] Found Log Group: %#v", *lg) + + d.Set("arn", *lg.Arn) + d.Set("name", *lg.LogGroupName) + + if lg.RetentionInDays != nil { + d.Set("retention_in_days", *lg.RetentionInDays) + } + + return nil +} + +func lookupCloudWatchLogGroup(conn *cloudwatchlogs.CloudWatchLogs, + name string, nextToken *string) (*cloudwatchlogs.LogGroup, error) { + input := &cloudwatchlogs.DescribeLogGroupsInput{ + LogGroupNamePrefix: aws.String(name), + NextToken: nextToken, + } + resp, err := conn.DescribeLogGroups(input) + if err != nil { + return nil, err + } + + for _, lg := range resp.LogGroups { + if *lg.LogGroupName == name { + return lg, nil + } + } + + if resp.NextToken != nil { + return lookupCloudWatchLogGroup(conn, name, resp.NextToken) + } + + return nil, fmt.Errorf("CloudWatch Log Group %q not found", name) +} + +func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + name := d.Get("name").(string) + log.Printf("[DEBUG] Updating CloudWatch Log Group: %q", name) + + if d.HasChange("retention_in_days") { + var err error + + if v, ok := d.GetOk("retention_in_days"); ok { + input := cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String(name), + RetentionInDays: aws.Int64(int64(v.(int))), + } + log.Printf("[DEBUG] Setting retention for CloudWatch Log Group: %q: %s", name, input) + _, err = conn.PutRetentionPolicy(&input) + } else { + log.Printf("[DEBUG] Deleting retention for CloudWatch Log Group: %q", name) + _, err = conn.DeleteRetentionPolicy(&cloudwatchlogs.DeleteRetentionPolicyInput{ + LogGroupName: aws.String(name), + }) + } + + return err + } + + return resourceAwsCloudWatchLogGroupRead(d, meta) +} + +func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id()) + _, err := conn.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return fmt.Errorf("Error deleting CloudWatch Log Group: %s", err) + } + log.Println("[INFO] CloudWatch Log Group deleted") + + d.SetId("") + + return nil +} From 7b0626adb6fa59fbefbc648c8456e4a8796fe274 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 16 Sep 2015 22:02:45 +0100 Subject: [PATCH 022/220] provider/aws: Add docs for CloudWatch Log Group --- .../aws/r/cloudwatch_log_group.html.markdown | 33 +++++++++++++++++++ website/source/layouts/aws.erb | 4 +++ 2 files changed, 37 insertions(+) create mode 100644 website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown new file mode 100644 index 000000000..e784c6389 --- /dev/null +++ b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown @@ -0,0 +1,33 @@ +--- +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_group" +sidebar_current: "docs-aws-resource-cloudwatch-log-group" +description: |- + Provides a CloudWatch Log Group resource. +--- + +# aws\_cloudwatch\_log\_group + +Provides a CloudWatch Log Group resource. + +## Example Usage + +``` +resource "aws_cloudwatch_log_group" "yada" { + name = "Yada" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the log group +* `retention_in_days` - (Optional) Specifies the number of days + you want to retain log events in the specified log group. + +## Attributes Reference + +The following attributes are exported: + +* `arn` - The Amazon Resource Name (ARN) specifying the log group. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 22801a507..5c67ad58e 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -15,6 +15,10 @@ CloudWatch Resources From 4e3179cf31936e2899085b99d52bb71251c96e2e Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Tue, 6 Oct 2015 14:53:06 -0400 Subject: [PATCH 096/220] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e65fcfe0..3fd01bc0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ FEATURES: * **New resources: `aws_cloudwatch_log_group`** [GH-2415] * **New resource: `google_storage_bucket_object`** [GH-3192] * **New resources: `google_compute_vpn_gateway`, `google_compute_vpn_tunnel`** [GH-3213] + * **New resources: `google_storage_bucket_acl`, `google_storage_object_acl`** [GH-3272] * **New resource: `aws_iam_saml_provider`** [GH-3156] * **New resources: `aws_efs_file_system` and `aws_efs_mount_target`** [GH-2196] @@ -29,6 +30,7 @@ IMPROVEMENTS: * provider/aws: Add `configuation_endpoint` to `aws_elasticache_cluster` [GH-3250] * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035] * provider/openstack: add functionality to attach FloatingIP to Port [GH-1788] + * provider/google: Can now do multi-region deployments without using multiple providers [GH-3258] * remote/s3: Allow canned ACLs to be set on state objects. [GH-3233] BUG FIXES: From 82946d8eb17192be5c48e81ee35b76b2c6ca4966 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 6 Oct 2015 14:58:21 -0500 Subject: [PATCH 097/220] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3fd01bc0f..d918241a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ FEATURES: * **New provider: `rundeck`** [GH-2412] * **New resource: `cloudstack_loadbalancer_rule`** [GH-2934] * **New resource: `google_compute_project_metadata`** [GH-3065] - * **New resources: `aws_ami`, `aws_ami_copy`, `aws_ami_from_instance`** [GH-2874] + * **New resources: `aws_ami`, `aws_ami_copy`, `aws_ami_from_instance`** [GH-2784] * **New resources: `aws_cloudwatch_log_group`** [GH-2415] * **New resource: `google_storage_bucket_object`** [GH-3192] * **New resources: `google_compute_vpn_gateway`, `google_compute_vpn_tunnel`** [GH-3213] From e635d40bd2cfa304283e7dae0afcff2a04955a90 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 6 Oct 2015 15:25:41 -0500 Subject: [PATCH 098/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d918241a6..7019444c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ FEATURES: * **New resources: `google_storage_bucket_acl`, `google_storage_object_acl`** [GH-3272] * **New resource: `aws_iam_saml_provider`** [GH-3156] * **New resources: `aws_efs_file_system` and `aws_efs_mount_target`** [GH-2196] + * **New resources: `aws_opsworks_*`** [GH-2162] IMPROVEMENTS: From 5739c4869ca586e4ebf878e2687ef81ce488c327 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 4 Aug 2015 16:24:55 -0500 Subject: [PATCH 099/220] provider/aws: Docs for RDS Cluster, Cluster Instance --- builtin/providers/aws/provider.go | 2 + .../providers/aws/resource_aws_db_instance.go | 28 +- .../providers/aws/resource_aws_rds_cluster.go | 319 ++++++++++++++++++ .../aws/resource_aws_rds_cluster_instance.go | 210 ++++++++++++ .../resource_aws_rds_cluster_instance_test.go | 127 +++++++ .../aws/resource_aws_rds_cluster_test.go | 102 ++++++ builtin/providers/aws/structure.go | 22 ++ builtin/providers/aws/tagsRDS.go | 22 +- website/Gemfile.lock | 3 - .../providers/aws/r/rds_cluster.html.markdown | 85 +++++ .../aws/r/rds_cluster_instance.html.markdown | 87 +++++ website/source/layouts/aws.erb | 11 +- 12 files changed, 987 insertions(+), 31 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_rds_cluster.go create mode 100644 builtin/providers/aws/resource_aws_rds_cluster_instance.go create mode 100644 builtin/providers/aws/resource_aws_rds_cluster_instance_test.go create mode 100644 builtin/providers/aws/resource_aws_rds_cluster_test.go create mode 100644 website/source/docs/providers/aws/r/rds_cluster.html.markdown create mode 100644 website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 3b5aa67ad..cf89db0b7 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -219,6 +219,8 @@ func Provider() terraform.ResourceProvider { "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), + "aws_rds_cluster": resourceAwsRDSCluster(), + "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), "aws_route53_record": resourceAwsRoute53Record(), "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go index afe763500..60b3dd329 100644 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ b/builtin/providers/aws/resource_aws_db_instance.go @@ -75,29 +75,10 @@ func resourceAwsDbInstance() *schema.Resource { }, "identifier": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - return - }, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRdsId, }, "instance_class": &schema.Schema{ @@ -524,7 +505,6 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { if v.DBName != nil && *v.DBName != "" { name = *v.DBName } - log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name) } else { resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go new file mode 100644 index 000000000..0e3d9339a --- /dev/null +++ b/builtin/providers/aws/resource_aws_rds_cluster.go @@ -0,0 +1,319 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRDSCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRDSClusterCreate, + Read: resourceAwsRDSClusterRead, + Update: resourceAwsRDSClusterUpdate, + Delete: resourceAwsRDSClusterDelete, + + Schema: map[string]*schema.Schema{ + + "availability_zones": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ForceNew: true, + Computed: true, + Set: schema.HashString, + }, + + "cluster_identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRdsId, + }, + + "cluster_members": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + Set: schema.HashString, + }, + + "database_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "db_subnet_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "engine": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "master_username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "master_password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + // apply_immediately is used to determine when the update modifications + // take place. + // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html + "apply_immediately": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "vpc_security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + createOpts := &rds.CreateDBClusterInput{ + DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Engine: aws.String("aurora"), + MasterUserPassword: aws.String(d.Get("master_password").(string)), + MasterUsername: aws.String(d.Get("master_username").(string)), + } + + if v := d.Get("database_name"); v.(string) != "" { + createOpts.DatabaseName = aws.String(v.(string)) + } + + if attr, ok := d.GetOk("port"); ok { + createOpts.Port = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + createOpts.DBSubnetGroupName = aws.String(attr.(string)) + } + + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) + } + + if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { + createOpts.AvailabilityZones = expandStringList(attr.List()) + } + + log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts) + resp, err := conn.CreateDBCluster(createOpts) + if err != nil { + log.Printf("[ERROR] Error creating RDS Cluster: %s", err) + return err + } + + log.Printf("[DEBUG]: Cluster create response: %s", resp) + d.SetId(*resp.DBCluster.DBClusterIdentifier) + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying"}, + Target: "available", + Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), + Timeout: 5 * time.Minute, + MinTimeout: 3 * time.Second, + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err) + } + + return resourceAwsRDSClusterRead(d, meta) +} + +func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(d.Id()), + // final snapshot identifier + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "DBClusterNotFoundFault" == awsErr.Code() { + d.SetId("") + log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id()) + return nil + } + } + log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id()) + return err + } + + var dbc *rds.DBCluster + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == d.Id() { + dbc = c + } + } + + if dbc == nil { + log.Printf("[WARN] RDS Cluster (%s) not found", d.Id()) + d.SetId("") + return nil + } + + if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { + return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err) + } + d.Set("database_name", dbc.DatabaseName) + d.Set("db_subnet_group_name", dbc.DBSubnetGroup) + d.Set("endpoint", dbc.Endpoint) + d.Set("engine", dbc.Engine) + d.Set("master_username", dbc.MasterUsername) + d.Set("port", dbc.Port) + + var vpcg []string + for _, g := range dbc.VpcSecurityGroups { + vpcg = append(vpcg, *g.VpcSecurityGroupId) + } + if err := d.Set("vpc_security_group_ids", vpcg); err != nil { + return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err) + } + + var cm []string + for _, m := range dbc.DBClusterMembers { + cm = append(cm, *m.DBInstanceIdentifier) + } + if err := d.Set("cluster_members", cm); err != nil { + return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err) + } + + return nil +} + +func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + req := &rds.ModifyDBClusterInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + DBClusterIdentifier: aws.String(d.Id()), + } + + if d.HasChange("master_password") { + req.MasterUserPassword = aws.String(d.Get("master_password").(string)) + } + + if d.HasChange("vpc_security_group_ids") { + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + req.VpcSecurityGroupIds = expandStringList(attr.List()) + } else { + req.VpcSecurityGroupIds = []*string{} + } + } + + _, err := conn.ModifyDBCluster(req) + if err != nil { + return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err) + } + + return resourceAwsRDSClusterRead(d, meta) +} + +func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id()) + + _, err := conn.DeleteDBCluster(&rds.DeleteDBClusterInput{ + DBClusterIdentifier: aws.String(d.Id()), + SkipFinalSnapshot: aws.Bool(true), + // final snapshot identifier + }) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", "backing-up", "modifying"}, + Target: "destroyed", + Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), + Timeout: 5 * time.Minute, + MinTimeout: 3 * time.Second, + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err) + } + + return nil +} + +func resourceAwsRDSClusterStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).rdsconn + + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(d.Id()), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "DBClusterNotFoundFault" == awsErr.Code() { + return 42, "destroyed", nil + } + } + log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err) + return nil, "", err + } + + var dbc *rds.DBCluster + + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == d.Id() { + dbc = c + } + } + + if dbc == nil { + return 42, "destroyed", nil + } + + if dbc.Status != nil { + log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status) + } + + return dbc, *dbc.Status, nil + } +} diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go new file mode 100644 index 000000000..27a82b897 --- /dev/null +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -0,0 +1,210 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRDSClusterInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRDSClusterInstanceCreate, + Read: resourceAwsRDSClusterInstanceRead, + Update: resourceAwsRDSClusterInstanceUpdate, + Delete: resourceAwsRDSClusterInstanceDelete, + + Schema: map[string]*schema.Schema{ + "identifier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateRdsId, + }, + + "db_subnet_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "writer": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + + "cluster_identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "instance_class": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + createOpts := &rds.CreateDBInstanceInput{ + DBInstanceClass: aws.String(d.Get("instance_class").(string)), + DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Engine: aws.String("aurora"), + Tags: tags, + } + + if v := d.Get("identifier").(string); v != "" { + createOpts.DBInstanceIdentifier = aws.String(v) + } else { + createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId()) + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + createOpts.DBSubnetGroupName = aws.String(attr.(string)) + } + + log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts) + resp, err := conn.CreateDBInstance(createOpts) + if err != nil { + return err + } + + d.SetId(*resp.DBInstance.DBInstanceIdentifier) + + // reuse db_instance refresh func + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying"}, + Target: "available", + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsRDSClusterInstanceRead(d, meta) +} + +func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) error { + db, err := resourceAwsDbInstanceRetrieve(d, meta) + if err != nil { + log.Printf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err) + d.SetId("") + return nil + } + + // Retreive DB Cluster information, to determine if this Instance is a writer + conn := meta.(*AWSClient).rdsconn + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: db.DBClusterIdentifier, + }) + + var dbc *rds.DBCluster + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == *db.DBClusterIdentifier { + dbc = c + } + } + + if dbc == nil { + return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s", + *db.DBClusterIdentifier, *db.DBInstanceIdentifier, err) + } + + for _, m := range dbc.DBClusterMembers { + if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier { + if *m.IsClusterWriter == true { + d.Set("writer", true) + } else { + d.Set("writer", false) + } + } + } + + if db.Endpoint != nil { + d.Set("endpoint", db.Endpoint.Address) + d.Set("port", db.Endpoint.Port) + } + + // Fetch and save tags + arn, err := buildRDSARN(d, meta) + if err != nil { + log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier) + } else { + if err := saveTagsRDS(conn, d, arn); err != nil { + log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err) + } + } + + return nil +} + +func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + if arn, err := buildRDSARN(d, meta); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } + } + + return resourceAwsRDSClusterInstanceRead(d, meta) +} + +func resourceAwsRDSClusterInstanceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id()) + + opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} + + log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts) + if _, err := conn.DeleteDBInstance(&opts); err != nil { + return err + } + + // re-uses db_instance refresh func + log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed") + stateConf := &resource.StateChangeConf{ + Pending: []string{"modifying", "deleting"}, + Target: "", + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return err + } + + return nil + +} diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go new file mode 100644 index 000000000..aff6aa786 --- /dev/null +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go @@ -0,0 +1,127 @@ +package aws + +import ( + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/rds" +) + +func TestAccAWSRDSClusterInstance_basic(t *testing.T) { + var v rds.DBInstance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterInstanceConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), + testAccCheckAWSDBClusterInstanceAttributes(&v), + ), + }, + }, + }) +} + +func testAccCheckAWSClusterInstanceDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_rds_cluster" { + continue + } + + // Try to find the Group + conn := testAccProvider.Meta().(*AWSClient).rdsconn + var err error + resp, err := conn.DescribeDBInstances( + &rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String(rs.Primary.ID), + }) + + if err == nil { + if len(resp.DBInstances) != 0 && + *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID { + return fmt.Errorf("DB Cluster Instance %s still exists", rs.Primary.ID) + } + } + + //check for an expected "Cluster not found" type error + return err + + } + + return nil +} + +func testAccCheckAWSDBClusterInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if *v.Engine != "aurora" { + return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine) + } + + if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") { + return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier) + } + + return nil + } +} + +func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No DB Instance ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).rdsconn + resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + for _, d := range resp.DBInstances { + if *d.DBInstanceIdentifier == rs.Primary.ID { + *v = *d + return nil + } + } + + return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) + } +} + +// Add some random to the name, to avoid collision +var testAccAWSClusterInstanceConfig = fmt.Sprintf(` +resource "aws_rds_cluster" "default" { + cluster_identifier = "tf-aurora-cluster-test-%d" + availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] + database_name = "mydb" + master_username = "foo" + master_password = "mustbeeightcharaters" +} + +resource "aws_rds_cluster_instance" "cluster_instances" { + identifier = "aurora-cluster-test-instance" + cluster_identifier = "${aws_rds_cluster.default.id}" + instance_class = "db.r3.large" +} + +`, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_test.go b/builtin/providers/aws/resource_aws_rds_cluster_test.go new file mode 100644 index 000000000..18ffa7bf2 --- /dev/null +++ b/builtin/providers/aws/resource_aws_rds_cluster_test.go @@ -0,0 +1,102 @@ +package aws + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/rds" +) + +func TestAccAWSRDSCluster_basic(t *testing.T) { + var v rds.DBCluster + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), + ), + }, + }, + }) +} + +func testAccCheckAWSClusterDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_rds_cluster" { + continue + } + + // Try to find the Group + conn := testAccProvider.Meta().(*AWSClient).rdsconn + var err error + resp, err := conn.DescribeDBClusters( + &rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(rs.Primary.ID), + }) + + if err == nil { + if len(resp.DBClusters) != 0 && + *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { + return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) + } + } + + // check for an expected "Cluster not found" type error + return err + + } + + return nil +} + +func testAccCheckAWSClusterExists(n string, v *rds.DBCluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No DB Instance ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).rdsconn + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == rs.Primary.ID { + *v = *c + return nil + } + } + + return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) + } +} + +// Add some random to the name, to avoid collision +var testAccAWSClusterConfig = fmt.Sprintf(` +resource "aws_rds_cluster" "default" { + cluster_identifier = "tf-aurora-cluster-%d" + availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] + database_name = "mydb" + master_username = "foo" + master_password = "mustbeeightcharaters" +}`, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index d736e0ad5..d8dd6af65 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "regexp" "sort" "strings" @@ -457,3 +458,24 @@ func expandResourceRecords(recs []interface{}, typeStr string) []*route53.Resour } return records } + +func validateRdsId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + return +} diff --git a/builtin/providers/aws/tagsRDS.go b/builtin/providers/aws/tagsRDS.go index 3e4e0c700..7ba0ee903 100644 --- a/builtin/providers/aws/tagsRDS.go +++ b/builtin/providers/aws/tagsRDS.go @@ -1,6 +1,7 @@ package aws import ( + "fmt" "log" "github.com/aws/aws-sdk-go/aws" @@ -19,7 +20,7 @@ func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { // Set tags if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) + log.Printf("[DEBUG] Removing tags: %s", remove) k := make([]*string, len(remove), len(remove)) for i, t := range remove { k[i] = t.Key @@ -34,7 +35,7 @@ func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { } } if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) + log.Printf("[DEBUG] Creating tags: %s", create) _, err := conn.AddTagsToResource(&rds.AddTagsToResourceInput{ ResourceName: aws.String(arn), Tags: create, @@ -93,3 +94,20 @@ func tagsToMapRDS(ts []*rds.Tag) map[string]string { return result } + +func saveTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + return fmt.Errorf("[DEBUG] Error retreiving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + + return d.Set("tags", tagsToMapRDS(dt)) +} diff --git a/website/Gemfile.lock b/website/Gemfile.lock index fac790740..cff5dfa3e 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -186,6 +186,3 @@ PLATFORMS DEPENDENCIES middleman-hashicorp! - -BUNDLED WITH - 1.10.6 diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown new file mode 100644 index 000000000..2490e8529 --- /dev/null +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -0,0 +1,85 @@ +--- +layout: "aws" +page_title: "AWS: aws_rds_cluster" +sidebar_current: "docs-aws-resource-rds-cluster" +description: |- + Provides an RDS Cluster Resource +--- + +# aws\_rds\_cluster + +Provides an RDS Cluster Resource. A Cluster Resource defines attributes that are +applied to the entire cluster of [RDS Cluster Instances][3]. Use the RDS Cluster +resource and RDS Cluster Instances to create and use Amazon Aurora, a MySQL-compatible +database engine. + +For more information on Amazon Aurora, see [Aurora on Amazon RDS][2] in the Amazon RDS User Guide. + +## Example Usage + +``` +resource "aws_rds_cluster" "default" { + cluster_identifier = "aurora-cluster-demo" + availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] + database_name = "mydb" + master_username = "foo" + master_password = "bar" +} +``` + +~> **NOTE:** RDS Clusters resources that are created without any matching +RDS Cluster Instances do not currently display in the AWS Console. + +## Argument Reference + +For more detailed documentation about each argument, refer to +the [AWS official documentation](http://docs.aws.amazon.com/AmazonRDS/latest/CommandLineReference/CLIReference-cmd-ModifyDBInstance.html). + +The following arguments are supported: + +* `cluster_identifier` - (Required) The Cluster Identifier. Must be a lower case +string. +* `database_name` - (Optional) The name for your database of up to 8 alpha-numeric + characters. If you do not provide a name, Amazon RDS will not create a + database in the DB cluster you are creating +* `master_password` - (Required) Password for the master DB user. Note that this may + show up in logs, and it will be stored in the state file +* `master_username` - (Required) Username for the master DB user +* `availability_zones` - (Optional) A list of EC2 Availability Zones that + instances in the DB cluster can be created in +* `backup_retention_period` - (Optional) The days to retain backups for. Default +1 +* `port` - (Optional) The port on which the DB accepts connections +* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate + with the Cluster +* `apply_immediately` - (Optional) Specifies whether any cluster modifications + are applied immediately, or during the next maintenance window. Default is + `false`. See [Amazon RDS Documentation for more information.](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) +* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The RDS Cluster Identifier +* `cluster_identifier` - The RDS Cluster Identifier +* `cluster_members` – List of RDS Instances that are a part of this cluster +* `address` - The address of the RDS instance. +* `allocated_storage` - The amount of allocated storage +* `availability_zones` - The availability zone of the instance +* `backup_retention_period` - The backup retention period +* `backup_window` - The backup window +* `endpoint` - The primary, writeable connection endpoint +* `engine` - The database engine +* `engine_version` - The database engine version +* `maintenance_window` - The instance maintenance window +* `database_name` - The database name +* `port` - The database port +* `status` - The RDS instance status +* `username` - The master username for the database +* `storage_encrypted` - Specifies whether the DB instance is encrypted + +[1]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Replication.html + +[2]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html +[3]: /docs/providers/aws/r/rds_cluster_instance.html diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown new file mode 100644 index 000000000..49769a393 --- /dev/null +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -0,0 +1,87 @@ +--- +layout: "aws" +page_title: "AWS: aws_rds_cluster_instance" +sidebar_current: "docs-aws-resource-rds-cluster-instance" +description: |- + Provides an RDS Cluster Resource Instance +--- + +# aws\_rds\_cluster\_instance + +Provides an RDS Cluster Resource Instance. A Cluster Instance Resource defines +attributes that are specific to a single instance in a [RDS Cluster][3], +specifically running Amazon Aurora. + +Unlike other RDS resources that support replication, with Amazon Aurora you do +not designate a primary and subsequent replicas. Instead, you simply add RDS +Instances and Aurora manages the replication. You can use the [count][5] +meta-parameter to make multiple instances and join them all to the same RDS +Cluster, or you may specify different Cluster Instance resources with various +`instance_class` sizes. + +For more information on Amazon Aurora, see [Aurora on Amazon RDS][2] in the Amazon RDS User Guide. + +## Example Usage + +``` +resource "aws_rds_cluster_instance" "cluster_instances" { + count = 2 + identifier = "aurora-cluster-demo" + cluster_identifer = "${aws_rds_cluster.default.id}" + instance_class = "db.r3.large" +} + +resource "aws_rds_cluster" "default" { + cluster_identifier = "aurora-cluster-demo" + availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] + database_name = "mydb" + master_username = "foo" + master_password = "bar" +} +``` + +## Argument Reference + +For more detailed documentation about each argument, refer to +the [AWS official documentation](http://docs.aws.amazon.com/AmazonRDS/latest/CommandLineReference/CLIReference-cmd-ModifyDBInstance.html). + +The following arguments are supported: + +* `identifier` - (Required) The Instance Identifier. Must be a lower case +string. +* `cluster_identifier` - (Required) The Cluster Identifier for this Instance to +join. Must be a lower case +string. +* `instance_class` - (Required) The instance class to use. For details on CPU +and memory, see [Scaling Aurora DB Instances][4]. Aurora currently + supports the below instance classes. + - db.r3.large + - db.r3.xlarge + - db.r3.2xlarge + - db.r3.4xlarge + - db.r3.8xlarge + +. + +## Attributes Reference + +The following attributes are exported: + +* `cluster_identifier` - The RDS Cluster Identifier +* `identifier` - The Instance identifier +* `id` - The Instance identifier +* `writer` – Boolean indicating if this instance is writable. `False` indicates +this instance is a read replica +* `allocated_storage` - The amount of allocated storage +* `availability_zones` - The availability zone of the instance +* `endpoint` - The IP address for this instance. May not be writable +* `engine` - The database engine +* `engine_version` - The database engine version +* `database_name` - The database name +* `port` - The database port +* `status` - The RDS instance status + +[2]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html +[3]: /docs/providers/aws/r/rds_cluster.html +[4]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html +[5]: /docs/configuration/resources.html#count diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 296463206..33a0cf8b1 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -306,8 +306,7 @@ - - > + > RDS Resources From 77d8f873087febeb6a148196b9b2ade546f13d30 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 6 Oct 2015 09:08:35 -0500 Subject: [PATCH 100/220] add publicly_accessible, update docs --- .../providers/aws/resource_aws_rds_cluster_instance.go | 10 ++++++++++ .../docs/providers/aws/r/rds_cluster.html.markdown | 1 - .../providers/aws/r/rds_cluster_instance.html.markdown | 6 ++++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go index 27a82b897..df4bc1f5f 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -54,6 +54,13 @@ func resourceAwsRDSClusterInstance() *schema.Resource { Computed: true, }, + "publicly_accessible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "instance_class": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -73,6 +80,7 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{ DBInstanceClass: aws.String(d.Get("instance_class").(string)), DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), Engine: aws.String("aurora"), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), Tags: tags, } @@ -154,6 +162,8 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("port", db.Endpoint.Port) } + d.Set("publicly_accessible", db.PubliclyAccessible) + // Fetch and save tags arn, err := buildRDSARN(d, meta) if err != nil { diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown index 2490e8529..64870b889 100644 --- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -55,7 +55,6 @@ string. * `apply_immediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. See [Amazon RDS Documentation for more information.](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) -* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate. ## Attributes Reference diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 49769a393..1571beab7 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -60,8 +60,9 @@ and memory, see [Scaling Aurora DB Instances][4]. Aurora currently - db.r3.2xlarge - db.r3.4xlarge - db.r3.8xlarge - -. +* `publicly_accessible` - (Optional) Bool to control if instance is publicly accessible. +Default `false`. See the documentation on [Creating DB Instances][6] for more +details on controlling this property. ## Attributes Reference @@ -85,3 +86,4 @@ this instance is a read replica [3]: /docs/providers/aws/r/rds_cluster.html [4]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html [5]: /docs/configuration/resources.html#count +[6]: http://docs.aws.amazon.com/fr_fr/AmazonRDS/latest/APIReference/API_CreateDBInstance.html From 70841285c26167515a4b66c474cd8789882f1e29 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 7 Oct 2015 11:04:34 -0500 Subject: [PATCH 101/220] Update RDS Cluster for final snapshot, update tests/docs --- .../providers/aws/resource_aws_rds_cluster.go | 38 ++++++++++++++++--- .../resource_aws_rds_cluster_instance_test.go | 9 ++++- .../aws/resource_aws_rds_cluster_test.go | 10 ++++- .../providers/aws/r/rds_cluster.html.markdown | 3 ++ .../aws/r/rds_cluster_instance.html.markdown | 4 +- 5 files changed, 54 insertions(+), 10 deletions(-) diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go index 0e3d9339a..897fe31ed 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster.go +++ b/builtin/providers/aws/resource_aws_rds_cluster.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "time" "github.com/aws/aws-sdk-go/aws" @@ -69,6 +70,25 @@ func resourceAwsRDSCluster() *schema.Resource { Computed: true, }, + "final_snapshot_identifier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + es = append(es, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + es = append(es, fmt.Errorf("%q cannot end in a hyphen", k)) + } + return + }, + }, + "master_username": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -167,7 +187,6 @@ func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ DBClusterIdentifier: aws.String(d.Id()), - // final snapshot identifier }) if err != nil { @@ -256,11 +275,20 @@ func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error conn := meta.(*AWSClient).rdsconn log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id()) - _, err := conn.DeleteDBCluster(&rds.DeleteDBClusterInput{ + deleteOpts := rds.DeleteDBClusterInput{ DBClusterIdentifier: aws.String(d.Id()), - SkipFinalSnapshot: aws.Bool(true), - // final snapshot identifier - }) + } + + finalSnapshot := d.Get("final_snapshot_identifier").(string) + if finalSnapshot == "" { + deleteOpts.SkipFinalSnapshot = aws.Bool(true) + } else { + deleteOpts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot) + deleteOpts.SkipFinalSnapshot = aws.Bool(false) + } + + log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts) + _, err := conn.DeleteDBCluster(&deleteOpts) stateConf := &resource.StateChangeConf{ Pending: []string{"deleting", "backing-up", "modifying"}, diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go index aff6aa786..f923c1712 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform/terraform" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/rds" ) @@ -54,7 +55,13 @@ func testAccCheckAWSClusterInstanceDestroy(s *terraform.State) error { } } - //check for an expected "Cluster not found" type error + // Return nil if the Cluster Instance is already destroyed + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "DBInstanceNotFound" { + return nil + } + } + return err } diff --git a/builtin/providers/aws/resource_aws_rds_cluster_test.go b/builtin/providers/aws/resource_aws_rds_cluster_test.go index 18ffa7bf2..2fd768949 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform/terraform" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/rds" ) @@ -52,9 +53,14 @@ func testAccCheckAWSClusterDestroy(s *terraform.State) error { } } - // check for an expected "Cluster not found" type error - return err + // Return nil if the cluster is already destroyed + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "DBClusterNotFound" { + return nil + } + } + return err } return nil diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown index 64870b889..c45814f46 100644 --- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -45,6 +45,9 @@ string. * `master_password` - (Required) Password for the master DB user. Note that this may show up in logs, and it will be stored in the state file * `master_username` - (Required) Username for the master DB user +* `final_snapshot_identifier` - (Optional) The name of your final DB snapshot + when this DB cluster is deleted. If omitted, no final snapshot will be + made. * `availability_zones` - (Optional) A list of EC2 Availability Zones that instances in the DB cluster can be created in * `backup_retention_period` - (Optional) The days to retain backups for. Default diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 1571beab7..76792cc51 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -47,8 +47,8 @@ the [AWS official documentation](http://docs.aws.amazon.com/AmazonRDS/latest/Com The following arguments are supported: -* `identifier` - (Required) The Instance Identifier. Must be a lower case -string. +* `identifier` - (Optional) The Instance Identifier. Must be a lower case +string. If omited, a unique identifier will be generated. * `cluster_identifier` - (Required) The Cluster Identifier for this Instance to join. Must be a lower case string. From 7abe2a10e7a16046c1dfde831a0a3678180274f8 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 7 Oct 2015 11:24:00 -0500 Subject: [PATCH 102/220] Fix spellng errorr --- .../docs/providers/aws/r/rds_cluster_instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 76792cc51..782339a34 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -48,7 +48,7 @@ the [AWS official documentation](http://docs.aws.amazon.com/AmazonRDS/latest/Com The following arguments are supported: * `identifier` - (Optional) The Instance Identifier. Must be a lower case -string. If omited, a unique identifier will be generated. +string. If omitted, a unique identifier will be generated. * `cluster_identifier` - (Required) The Cluster Identifier for this Instance to join. Must be a lower case string. From 71b1cb1289f9f35258e43e84025552f46e5f99a5 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 7 Oct 2015 11:27:24 -0500 Subject: [PATCH 103/220] go fmt after rebase --- .../providers/aws/resource_aws_rds_cluster.go | 548 +++++++++--------- .../aws/resource_aws_rds_cluster_instance.go | 332 +++++------ .../resource_aws_rds_cluster_instance_test.go | 166 +++--- .../aws/resource_aws_rds_cluster_test.go | 144 ++--- 4 files changed, 595 insertions(+), 595 deletions(-) diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go index 897fe31ed..57f3a27b3 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster.go +++ b/builtin/providers/aws/resource_aws_rds_cluster.go @@ -1,347 +1,347 @@ package aws import ( - "fmt" - "log" - "regexp" - "time" + "fmt" + "log" + "regexp" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" ) func resourceAwsRDSCluster() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRDSClusterCreate, - Read: resourceAwsRDSClusterRead, - Update: resourceAwsRDSClusterUpdate, - Delete: resourceAwsRDSClusterDelete, + return &schema.Resource{ + Create: resourceAwsRDSClusterCreate, + Read: resourceAwsRDSClusterRead, + Update: resourceAwsRDSClusterUpdate, + Delete: resourceAwsRDSClusterDelete, - Schema: map[string]*schema.Schema{ + Schema: map[string]*schema.Schema{ - "availability_zones": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ForceNew: true, - Computed: true, - Set: schema.HashString, - }, + "availability_zones": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ForceNew: true, + Computed: true, + Set: schema.HashString, + }, - "cluster_identifier": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRdsId, - }, + "cluster_identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRdsId, + }, - "cluster_members": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Computed: true, - Set: schema.HashString, - }, + "cluster_members": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + Set: schema.HashString, + }, - "database_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, + "database_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, - "db_subnet_group_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, + "db_subnet_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, - "engine": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, + "engine": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, - "final_snapshot_identifier": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - es = append(es, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - es = append(es, fmt.Errorf("%q cannot end in a hyphen", k)) - } - return - }, - }, + "final_snapshot_identifier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + es = append(es, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + es = append(es, fmt.Errorf("%q cannot end in a hyphen", k)) + } + return + }, + }, - "master_username": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, + "master_username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, - "master_password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, + "master_password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, - // apply_immediately is used to determine when the update modifications - // take place. - // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html - "apply_immediately": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, + // apply_immediately is used to determine when the update modifications + // take place. + // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html + "apply_immediately": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, - "vpc_security_group_ids": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } + "vpc_security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } } func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn + conn := meta.(*AWSClient).rdsconn - createOpts := &rds.CreateDBClusterInput{ - DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - Engine: aws.String("aurora"), - MasterUserPassword: aws.String(d.Get("master_password").(string)), - MasterUsername: aws.String(d.Get("master_username").(string)), - } + createOpts := &rds.CreateDBClusterInput{ + DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Engine: aws.String("aurora"), + MasterUserPassword: aws.String(d.Get("master_password").(string)), + MasterUsername: aws.String(d.Get("master_username").(string)), + } - if v := d.Get("database_name"); v.(string) != "" { - createOpts.DatabaseName = aws.String(v.(string)) - } + if v := d.Get("database_name"); v.(string) != "" { + createOpts.DatabaseName = aws.String(v.(string)) + } - if attr, ok := d.GetOk("port"); ok { - createOpts.Port = aws.Int64(int64(attr.(int))) - } + if attr, ok := d.GetOk("port"); ok { + createOpts.Port = aws.Int64(int64(attr.(int))) + } - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) - } + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + createOpts.DBSubnetGroupName = aws.String(attr.(string)) + } - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) - } + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) + } - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = expandStringList(attr.List()) - } + if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { + createOpts.AvailabilityZones = expandStringList(attr.List()) + } - log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts) - resp, err := conn.CreateDBCluster(createOpts) - if err != nil { - log.Printf("[ERROR] Error creating RDS Cluster: %s", err) - return err - } + log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts) + resp, err := conn.CreateDBCluster(createOpts) + if err != nil { + log.Printf("[ERROR] Error creating RDS Cluster: %s", err) + return err + } - log.Printf("[DEBUG]: Cluster create response: %s", resp) - d.SetId(*resp.DBCluster.DBClusterIdentifier) - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying"}, - Target: "available", - Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: 5 * time.Minute, - MinTimeout: 3 * time.Second, - } + log.Printf("[DEBUG]: Cluster create response: %s", resp) + d.SetId(*resp.DBCluster.DBClusterIdentifier) + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying"}, + Target: "available", + Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), + Timeout: 5 * time.Minute, + MinTimeout: 3 * time.Second, + } - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err) - } + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err) + } - return resourceAwsRDSClusterRead(d, meta) + return resourceAwsRDSClusterRead(d, meta) } func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn + conn := meta.(*AWSClient).rdsconn - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(d.Id()), - }) + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(d.Id()), + }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "DBClusterNotFoundFault" == awsErr.Code() { - d.SetId("") - log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id()) - return nil - } - } - log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id()) - return err - } + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "DBClusterNotFoundFault" == awsErr.Code() { + d.SetId("") + log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id()) + return nil + } + } + log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id()) + return err + } - var dbc *rds.DBCluster - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == d.Id() { - dbc = c - } - } + var dbc *rds.DBCluster + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == d.Id() { + dbc = c + } + } - if dbc == nil { - log.Printf("[WARN] RDS Cluster (%s) not found", d.Id()) - d.SetId("") - return nil - } + if dbc == nil { + log.Printf("[WARN] RDS Cluster (%s) not found", d.Id()) + d.SetId("") + return nil + } - if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { - return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err) - } - d.Set("database_name", dbc.DatabaseName) - d.Set("db_subnet_group_name", dbc.DBSubnetGroup) - d.Set("endpoint", dbc.Endpoint) - d.Set("engine", dbc.Engine) - d.Set("master_username", dbc.MasterUsername) - d.Set("port", dbc.Port) + if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { + return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err) + } + d.Set("database_name", dbc.DatabaseName) + d.Set("db_subnet_group_name", dbc.DBSubnetGroup) + d.Set("endpoint", dbc.Endpoint) + d.Set("engine", dbc.Engine) + d.Set("master_username", dbc.MasterUsername) + d.Set("port", dbc.Port) - var vpcg []string - for _, g := range dbc.VpcSecurityGroups { - vpcg = append(vpcg, *g.VpcSecurityGroupId) - } - if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err) - } + var vpcg []string + for _, g := range dbc.VpcSecurityGroups { + vpcg = append(vpcg, *g.VpcSecurityGroupId) + } + if err := d.Set("vpc_security_group_ids", vpcg); err != nil { + return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err) + } - var cm []string - for _, m := range dbc.DBClusterMembers { - cm = append(cm, *m.DBInstanceIdentifier) - } - if err := d.Set("cluster_members", cm); err != nil { - return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err) - } + var cm []string + for _, m := range dbc.DBClusterMembers { + cm = append(cm, *m.DBInstanceIdentifier) + } + if err := d.Set("cluster_members", cm); err != nil { + return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err) + } - return nil + return nil } func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn + conn := meta.(*AWSClient).rdsconn - req := &rds.ModifyDBClusterInput{ - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - DBClusterIdentifier: aws.String(d.Id()), - } + req := &rds.ModifyDBClusterInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + DBClusterIdentifier: aws.String(d.Id()), + } - if d.HasChange("master_password") { - req.MasterUserPassword = aws.String(d.Get("master_password").(string)) - } + if d.HasChange("master_password") { + req.MasterUserPassword = aws.String(d.Get("master_password").(string)) + } - if d.HasChange("vpc_security_group_ids") { - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - req.VpcSecurityGroupIds = expandStringList(attr.List()) - } else { - req.VpcSecurityGroupIds = []*string{} - } - } + if d.HasChange("vpc_security_group_ids") { + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + req.VpcSecurityGroupIds = expandStringList(attr.List()) + } else { + req.VpcSecurityGroupIds = []*string{} + } + } - _, err := conn.ModifyDBCluster(req) - if err != nil { - return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err) - } + _, err := conn.ModifyDBCluster(req) + if err != nil { + return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err) + } - return resourceAwsRDSClusterRead(d, meta) + return resourceAwsRDSClusterRead(d, meta) } func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id()) + conn := meta.(*AWSClient).rdsconn + log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id()) - deleteOpts := rds.DeleteDBClusterInput{ - DBClusterIdentifier: aws.String(d.Id()), - } + deleteOpts := rds.DeleteDBClusterInput{ + DBClusterIdentifier: aws.String(d.Id()), + } - finalSnapshot := d.Get("final_snapshot_identifier").(string) - if finalSnapshot == "" { - deleteOpts.SkipFinalSnapshot = aws.Bool(true) - } else { - deleteOpts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot) - deleteOpts.SkipFinalSnapshot = aws.Bool(false) - } + finalSnapshot := d.Get("final_snapshot_identifier").(string) + if finalSnapshot == "" { + deleteOpts.SkipFinalSnapshot = aws.Bool(true) + } else { + deleteOpts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot) + deleteOpts.SkipFinalSnapshot = aws.Bool(false) + } - log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts) - _, err := conn.DeleteDBCluster(&deleteOpts) + log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts) + _, err := conn.DeleteDBCluster(&deleteOpts) - stateConf := &resource.StateChangeConf{ - Pending: []string{"deleting", "backing-up", "modifying"}, - Target: "destroyed", - Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: 5 * time.Minute, - MinTimeout: 3 * time.Second, - } + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", "backing-up", "modifying"}, + Target: "destroyed", + Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), + Timeout: 5 * time.Minute, + MinTimeout: 3 * time.Second, + } - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err) - } + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err) + } - return nil + return nil } func resourceAwsRDSClusterStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - conn := meta.(*AWSClient).rdsconn + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).rdsconn - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(d.Id()), - }) + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(d.Id()), + }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if "DBClusterNotFoundFault" == awsErr.Code() { - return 42, "destroyed", nil - } - } - log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err) - return nil, "", err - } + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "DBClusterNotFoundFault" == awsErr.Code() { + return 42, "destroyed", nil + } + } + log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err) + return nil, "", err + } - var dbc *rds.DBCluster + var dbc *rds.DBCluster - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == d.Id() { - dbc = c - } - } + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == d.Id() { + dbc = c + } + } - if dbc == nil { - return 42, "destroyed", nil - } + if dbc == nil { + return 42, "destroyed", nil + } - if dbc.Status != nil { - log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status) - } + if dbc.Status != nil { + log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status) + } - return dbc, *dbc.Status, nil - } + return dbc, *dbc.Status, nil + } } diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go index df4bc1f5f..bdffd59d4 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -1,220 +1,220 @@ package aws import ( - "fmt" - "log" - "time" + "fmt" + "log" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" ) func resourceAwsRDSClusterInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsRDSClusterInstanceCreate, - Read: resourceAwsRDSClusterInstanceRead, - Update: resourceAwsRDSClusterInstanceUpdate, - Delete: resourceAwsRDSClusterInstanceDelete, + return &schema.Resource{ + Create: resourceAwsRDSClusterInstanceCreate, + Read: resourceAwsRDSClusterInstanceRead, + Update: resourceAwsRDSClusterInstanceUpdate, + Delete: resourceAwsRDSClusterInstanceDelete, - Schema: map[string]*schema.Schema{ - "identifier": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateRdsId, - }, + Schema: map[string]*schema.Schema{ + "identifier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateRdsId, + }, - "db_subnet_group_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, + "db_subnet_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, - "writer": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - }, + "writer": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, - "cluster_identifier": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, + "cluster_identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, - "port": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, - "publicly_accessible": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, + "publicly_accessible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, - "instance_class": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, + "instance_class": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, - "tags": tagsSchema(), - }, - } + "tags": tagsSchema(), + }, + } } func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn - tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + conn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) - createOpts := &rds.CreateDBInstanceInput{ - DBInstanceClass: aws.String(d.Get("instance_class").(string)), - DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - Engine: aws.String("aurora"), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - Tags: tags, - } + createOpts := &rds.CreateDBInstanceInput{ + DBInstanceClass: aws.String(d.Get("instance_class").(string)), + DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Engine: aws.String("aurora"), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + Tags: tags, + } - if v := d.Get("identifier").(string); v != "" { - createOpts.DBInstanceIdentifier = aws.String(v) - } else { - createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId()) - } + if v := d.Get("identifier").(string); v != "" { + createOpts.DBInstanceIdentifier = aws.String(v) + } else { + createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId()) + } - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) - } + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + createOpts.DBSubnetGroupName = aws.String(attr.(string)) + } - log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts) - resp, err := conn.CreateDBInstance(createOpts) - if err != nil { - return err - } + log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts) + resp, err := conn.CreateDBInstance(createOpts) + if err != nil { + return err + } - d.SetId(*resp.DBInstance.DBInstanceIdentifier) + d.SetId(*resp.DBInstance.DBInstanceIdentifier) - // reuse db_instance refresh func - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying"}, - Target: "available", - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 10 * time.Second, - } + // reuse db_instance refresh func + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying"}, + Target: "available", + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + } - // Wait, catching any errors - _, err = stateConf.WaitForState() - if err != nil { - return err - } + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } - return resourceAwsRDSClusterInstanceRead(d, meta) + return resourceAwsRDSClusterInstanceRead(d, meta) } func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) error { - db, err := resourceAwsDbInstanceRetrieve(d, meta) - if err != nil { - log.Printf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err) - d.SetId("") - return nil - } + db, err := resourceAwsDbInstanceRetrieve(d, meta) + if err != nil { + log.Printf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err) + d.SetId("") + return nil + } - // Retreive DB Cluster information, to determine if this Instance is a writer - conn := meta.(*AWSClient).rdsconn - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: db.DBClusterIdentifier, - }) + // Retreive DB Cluster information, to determine if this Instance is a writer + conn := meta.(*AWSClient).rdsconn + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: db.DBClusterIdentifier, + }) - var dbc *rds.DBCluster - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == *db.DBClusterIdentifier { - dbc = c - } - } + var dbc *rds.DBCluster + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == *db.DBClusterIdentifier { + dbc = c + } + } - if dbc == nil { - return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s", - *db.DBClusterIdentifier, *db.DBInstanceIdentifier, err) - } + if dbc == nil { + return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s", + *db.DBClusterIdentifier, *db.DBInstanceIdentifier, err) + } - for _, m := range dbc.DBClusterMembers { - if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier { - if *m.IsClusterWriter == true { - d.Set("writer", true) - } else { - d.Set("writer", false) - } - } - } + for _, m := range dbc.DBClusterMembers { + if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier { + if *m.IsClusterWriter == true { + d.Set("writer", true) + } else { + d.Set("writer", false) + } + } + } - if db.Endpoint != nil { - d.Set("endpoint", db.Endpoint.Address) - d.Set("port", db.Endpoint.Port) - } + if db.Endpoint != nil { + d.Set("endpoint", db.Endpoint.Address) + d.Set("port", db.Endpoint.Port) + } - d.Set("publicly_accessible", db.PubliclyAccessible) + d.Set("publicly_accessible", db.PubliclyAccessible) - // Fetch and save tags - arn, err := buildRDSARN(d, meta) - if err != nil { - log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier) - } else { - if err := saveTagsRDS(conn, d, arn); err != nil { - log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err) - } - } + // Fetch and save tags + arn, err := buildRDSARN(d, meta) + if err != nil { + log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier) + } else { + if err := saveTagsRDS(conn, d, arn); err != nil { + log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err) + } + } - return nil + return nil } func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn + conn := meta.(*AWSClient).rdsconn - if arn, err := buildRDSARN(d, meta); err == nil { - if err := setTagsRDS(conn, d, arn); err != nil { - return err - } - } + if arn, err := buildRDSARN(d, meta); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } + } - return resourceAwsRDSClusterInstanceRead(d, meta) + return resourceAwsRDSClusterInstanceRead(d, meta) } func resourceAwsRDSClusterInstanceDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).rdsconn + conn := meta.(*AWSClient).rdsconn - log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id()) + log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id()) - opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} + opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} - log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts) - if _, err := conn.DeleteDBInstance(&opts); err != nil { - return err - } + log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts) + if _, err := conn.DeleteDBInstance(&opts); err != nil { + return err + } - // re-uses db_instance refresh func - log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed") - stateConf := &resource.StateChangeConf{ - Pending: []string{"modifying", "deleting"}, - Target: "", - Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - } + // re-uses db_instance refresh func + log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed") + stateConf := &resource.StateChangeConf{ + Pending: []string{"modifying", "deleting"}, + Target: "", + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + } - if _, err := stateConf.WaitForState(); err != nil { - return err - } + if _, err := stateConf.WaitForState(); err != nil { + return err + } - return nil + return nil } diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go index f923c1712..046132fad 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go @@ -1,118 +1,118 @@ package aws import ( - "fmt" - "math/rand" - "strings" - "testing" - "time" + "fmt" + "math/rand" + "strings" + "testing" + "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" ) func TestAccAWSRDSClusterInstance_basic(t *testing.T) { - var v rds.DBInstance + var v rds.DBInstance - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSClusterInstanceConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), - testAccCheckAWSDBClusterInstanceAttributes(&v), - ), - }, - }, - }) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterInstanceConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), + testAccCheckAWSDBClusterInstanceAttributes(&v), + ), + }, + }, + }) } func testAccCheckAWSClusterInstanceDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_rds_cluster" { - continue - } + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_rds_cluster" { + continue + } - // Try to find the Group - conn := testAccProvider.Meta().(*AWSClient).rdsconn - var err error - resp, err := conn.DescribeDBInstances( - &rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: aws.String(rs.Primary.ID), - }) + // Try to find the Group + conn := testAccProvider.Meta().(*AWSClient).rdsconn + var err error + resp, err := conn.DescribeDBInstances( + &rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String(rs.Primary.ID), + }) - if err == nil { - if len(resp.DBInstances) != 0 && - *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Cluster Instance %s still exists", rs.Primary.ID) - } - } + if err == nil { + if len(resp.DBInstances) != 0 && + *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID { + return fmt.Errorf("DB Cluster Instance %s still exists", rs.Primary.ID) + } + } - // Return nil if the Cluster Instance is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "DBInstanceNotFound" { - return nil - } - } + // Return nil if the Cluster Instance is already destroyed + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "DBInstanceNotFound" { + return nil + } + } - return err + return err - } + } - return nil + return nil } func testAccCheckAWSDBClusterInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { + return func(s *terraform.State) error { - if *v.Engine != "aurora" { - return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine) - } + if *v.Engine != "aurora" { + return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine) + } - if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") { - return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier) - } + if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") { + return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier) + } - return nil - } + return nil + } } func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } + if rs.Primary.ID == "" { + return fmt.Errorf("No DB Instance ID is set") + } - conn := testAccProvider.Meta().(*AWSClient).rdsconn - resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{ - DBInstanceIdentifier: aws.String(rs.Primary.ID), - }) + conn := testAccProvider.Meta().(*AWSClient).rdsconn + resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String(rs.Primary.ID), + }) - if err != nil { - return err - } + if err != nil { + return err + } - for _, d := range resp.DBInstances { - if *d.DBInstanceIdentifier == rs.Primary.ID { - *v = *d - return nil - } - } + for _, d := range resp.DBInstances { + if *d.DBInstanceIdentifier == rs.Primary.ID { + *v = *d + return nil + } + } - return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) - } + return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) + } } // Add some random to the name, to avoid collision diff --git a/builtin/providers/aws/resource_aws_rds_cluster_test.go b/builtin/providers/aws/resource_aws_rds_cluster_test.go index 2fd768949..ffa2fa8e9 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_test.go @@ -1,100 +1,100 @@ package aws import ( - "fmt" - "math/rand" - "testing" - "time" + "fmt" + "math/rand" + "testing" + "time" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" ) func TestAccAWSRDSCluster_basic(t *testing.T) { - var v rds.DBCluster + var v rds.DBCluster - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSClusterConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), - ), - }, - }, - }) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), + ), + }, + }, + }) } func testAccCheckAWSClusterDestroy(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_rds_cluster" { - continue - } + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_rds_cluster" { + continue + } - // Try to find the Group - conn := testAccProvider.Meta().(*AWSClient).rdsconn - var err error - resp, err := conn.DescribeDBClusters( - &rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) + // Try to find the Group + conn := testAccProvider.Meta().(*AWSClient).rdsconn + var err error + resp, err := conn.DescribeDBClusters( + &rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(rs.Primary.ID), + }) - if err == nil { - if len(resp.DBClusters) != 0 && - *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) - } - } + if err == nil { + if len(resp.DBClusters) != 0 && + *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { + return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) + } + } - // Return nil if the cluster is already destroyed - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "DBClusterNotFound" { - return nil - } - } + // Return nil if the cluster is already destroyed + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "DBClusterNotFound" { + return nil + } + } - return err - } + return err + } - return nil + return nil } func testAccCheckAWSClusterExists(n string, v *rds.DBCluster) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } + if rs.Primary.ID == "" { + return fmt.Errorf("No DB Instance ID is set") + } - conn := testAccProvider.Meta().(*AWSClient).rdsconn - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) + conn := testAccProvider.Meta().(*AWSClient).rdsconn + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(rs.Primary.ID), + }) - if err != nil { - return err - } + if err != nil { + return err + } - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == rs.Primary.ID { - *v = *c - return nil - } - } + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == rs.Primary.ID { + *v = *c + return nil + } + } - return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) - } + return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) + } } // Add some random to the name, to avoid collision From 6b219a155386a77d23071dce02b29bc3e3632df4 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 7 Oct 2015 12:19:18 -0500 Subject: [PATCH 104/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7019444c0..8d95149fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ BUG FIXES: * provider/aws: Read instance source_dest_check and save to state [GH-3152] * provider/aws: Allow `weight = 0` in Route53 records [GH-3196] * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. [GH-3235] + * provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313] * provider/openstack: add state 'downloading' to list of expected states in `blockstorage_volume_v1` creation [GH-2866] * provider/openstack: remove security groups (by name) before adding security From 2b75e65129a41a0723971ca0fc5b3fb4efb02043 Mon Sep 17 00:00:00 2001 From: Robert Roland Date: Wed, 7 Oct 2015 13:07:41 -0700 Subject: [PATCH 105/220] Update container.html.markdown Correcting a misspelling in the docs. --- website/source/docs/providers/docker/r/container.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown index 5653f139a..fa940720d 100644 --- a/website/source/docs/providers/docker/r/container.html.markdown +++ b/website/source/docs/providers/docker/r/container.html.markdown @@ -76,7 +76,7 @@ the following: volume will be mounted. * `host_path` - (Optional, string) The path on the host where the volume is coming from. -* `read_only` - (Optinal, bool) If true, this volume will be readonly. +* `read_only` - (Optional, bool) If true, this volume will be readonly. Defaults to false. ## Attributes Reference From f9efede8524cec3f5a59090dc774c312f2516460 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 7 Oct 2015 13:35:06 -0700 Subject: [PATCH 106/220] gofmt files from recently merged PRs --- builtin/providers/aws/config.go | 2 +- builtin/providers/aws/conversions.go | 2 +- builtin/providers/aws/provider.go | 4 +- .../providers/aws/resource_aws_db_instance.go | 8 ++-- .../resource_aws_opsworks_ganglia_layer.go | 2 +- .../aws/resource_aws_opsworks_stack.go | 6 +-- builtin/providers/aws/structure.go | 38 +++++++++---------- builtin/providers/aws/tagsRDS.go | 28 +++++++------- .../azure/resource_azure_instance.go | 12 +++--- builtin/providers/google/metadata.go | 6 +-- .../google/resource_compute_instance.go | 8 ++-- .../resource_compute_project_metadata.go | 10 ++--- .../google/resource_compute_vpn_gateway.go | 4 +- .../google/resource_storage_bucket.go | 4 +- .../google/resource_storage_bucket_acl.go | 33 ++++++++-------- .../resource_storage_bucket_acl_test.go | 27 +++++++------ .../google/resource_storage_bucket_object.go | 7 ++-- .../resource_storage_bucket_object_test.go | 9 ++--- .../google/resource_storage_object_acl.go | 29 +++++++------- .../resource_storage_object_acl_test.go | 22 +++++------ ...source_openstack_blockstorage_volume_v1.go | 2 +- .../resource_openstack_compute_instance_v2.go | 1 - builtin/providers/rundeck/resource_job.go | 8 ++-- .../provisioners/chef/resource_provisioner.go | 1 - 24 files changed, 133 insertions(+), 140 deletions(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 6e57fd6a3..a20405997 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -62,7 +62,7 @@ type AWSClient struct { kinesisconn *kinesis.Kinesis elasticacheconn *elasticache.ElastiCache lambdaconn *lambda.Lambda - opsworksconn *opsworks.OpsWorks + opsworksconn *opsworks.OpsWorks } // Client configures and returns a fully initialized AWSClient diff --git a/builtin/providers/aws/conversions.go b/builtin/providers/aws/conversions.go index 791123745..5c0caca70 100644 --- a/builtin/providers/aws/conversions.go +++ b/builtin/providers/aws/conversions.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" ) -func makeAwsStringList(in []interface {}) []*string { +func makeAwsStringList(in []interface{}) []*string { ret := make([]*string, len(in), len(in)) for i := 0; i < len(in); i++ { ret[i] = aws.String(in[i].(string)) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index cf89db0b7..c915c61fb 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -219,8 +219,8 @@ func Provider() terraform.ResourceProvider { "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), - "aws_rds_cluster": resourceAwsRDSCluster(), - "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), + "aws_rds_cluster": resourceAwsRDSCluster(), + "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), "aws_route53_record": resourceAwsRoute53Record(), "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go index 60b3dd329..d1d3a4ae1 100644 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ b/builtin/providers/aws/resource_aws_db_instance.go @@ -75,10 +75,10 @@ func resourceAwsDbInstance() *schema.Resource { }, "identifier": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRdsId, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRdsId, }, "instance_class": &schema.Schema{ diff --git a/builtin/providers/aws/resource_aws_opsworks_ganglia_layer.go b/builtin/providers/aws/resource_aws_opsworks_ganglia_layer.go index c37bb70e5..24778501c 100644 --- a/builtin/providers/aws/resource_aws_opsworks_ganglia_layer.go +++ b/builtin/providers/aws/resource_aws_opsworks_ganglia_layer.go @@ -6,7 +6,7 @@ import ( func resourceAwsOpsworksGangliaLayer() *schema.Resource { layerType := &opsworksLayerType{ - TypeName: "monitoring-master", + TypeName: "monitoring-master", DefaultLayerName: "Ganglia", Attributes: map[string]*opsworksLayerTypeAttribute{ diff --git a/builtin/providers/aws/resource_aws_opsworks_stack.go b/builtin/providers/aws/resource_aws_opsworks_stack.go index 5d0bf1aa8..8eeda3f05 100644 --- a/builtin/providers/aws/resource_aws_opsworks_stack.go +++ b/builtin/providers/aws/resource_aws_opsworks_stack.go @@ -306,9 +306,9 @@ func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) er req := &opsworks.CreateStackInput{ DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)), - Name: aws.String(d.Get("name").(string)), - Region: aws.String(d.Get("region").(string)), - ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + Name: aws.String(d.Get("name").(string)), + Region: aws.String(d.Get("region").(string)), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), } inVpc := false if vpcId, ok := d.GetOk("vpc_id"); ok { diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index d8dd6af65..dc7b6d89b 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "regexp" + "regexp" "sort" "strings" @@ -460,22 +460,22 @@ func expandResourceRecords(recs []interface{}, typeStr string) []*route53.Resour } func validateRdsId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only lowercase alphanumeric characters and hyphens allowed in %q", k)) - } - if !regexp.MustCompile(`^[a-z]`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "first character of %q must be a letter", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q cannot end with a hyphen", k)) - } - return + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + return } diff --git a/builtin/providers/aws/tagsRDS.go b/builtin/providers/aws/tagsRDS.go index 7ba0ee903..bcc3eb9ea 100644 --- a/builtin/providers/aws/tagsRDS.go +++ b/builtin/providers/aws/tagsRDS.go @@ -1,7 +1,7 @@ package aws import ( - "fmt" + "fmt" "log" "github.com/aws/aws-sdk-go/aws" @@ -20,7 +20,7 @@ func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { // Set tags if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %s", remove) + log.Printf("[DEBUG] Removing tags: %s", remove) k := make([]*string, len(remove), len(remove)) for i, t := range remove { k[i] = t.Key @@ -35,7 +35,7 @@ func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { } } if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %s", create) + log.Printf("[DEBUG] Creating tags: %s", create) _, err := conn.AddTagsToResource(&rds.AddTagsToResourceInput{ ResourceName: aws.String(arn), Tags: create, @@ -96,18 +96,18 @@ func tagsToMapRDS(ts []*rds.Tag) map[string]string { } func saveTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { - resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ - ResourceName: aws.String(arn), - }) + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) - if err != nil { - return fmt.Errorf("[DEBUG] Error retreiving tags for ARN: %s", arn) - } + if err != nil { + return fmt.Errorf("[DEBUG] Error retreiving tags for ARN: %s", arn) + } - var dt []*rds.Tag - if len(resp.TagList) > 0 { - dt = resp.TagList - } + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } - return d.Set("tags", tagsToMapRDS(dt)) + return d.Set("tags", tagsToMapRDS(dt)) } diff --git a/builtin/providers/azure/resource_azure_instance.go b/builtin/providers/azure/resource_azure_instance.go index fb264f28e..c95285ec2 100644 --- a/builtin/providers/azure/resource_azure_instance.go +++ b/builtin/providers/azure/resource_azure_instance.go @@ -297,15 +297,15 @@ func resourceAzureInstanceCreate(d *schema.ResourceData, meta interface{}) (err if err != nil { return fmt.Errorf("Error configuring %s for Windows: %s", name, err) } - + if domain_name, ok := d.GetOk("domain_name"); ok { err = vmutils.ConfigureWindowsToJoinDomain( - &role, - d.Get("domain_username").(string), - d.Get("domain_password").(string), - domain_name.(string), + &role, + d.Get("domain_username").(string), + d.Get("domain_password").(string), + domain_name.(string), d.Get("domain_ou").(string), - ) + ) if err != nil { return fmt.Errorf("Error configuring %s for WindowsToJoinDomain: %s", name, err) } diff --git a/builtin/providers/google/metadata.go b/builtin/providers/google/metadata.go index bc609ac88..e75c45022 100644 --- a/builtin/providers/google/metadata.go +++ b/builtin/providers/google/metadata.go @@ -23,7 +23,7 @@ func MetadataRetryWrapper(update func() error) error { } } - return fmt.Errorf("Failed to update metadata after %d retries", attempt); + return fmt.Errorf("Failed to update metadata after %d retries", attempt) } // Update the metadata (serverMD) according to the provided diff (oldMDMap v @@ -51,7 +51,7 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa // Reformat old metadata into a list serverMD.Items = nil for key, val := range curMDMap { - v := val; + v := val serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ Key: key, Value: &v, @@ -60,7 +60,7 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa } // Format metadata from the server data format -> schema data format -func MetadataFormatSchema(md *compute.Metadata) (map[string]interface{}) { +func MetadataFormatSchema(md *compute.Metadata) map[string]interface{} { newMD := make(map[string]interface{}) for _, kv := range md.Items { diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 987964641..52575767e 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -507,12 +507,12 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - instance, err := getInstance(config, d); + instance, err := getInstance(config, d) if err != nil { return err } - // Synch metadata + // Synch metadata md := instance.Metadata if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil { @@ -644,7 +644,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err zone := d.Get("zone").(string) - instance, err := getInstance(config, d); + instance, err := getInstance(config, d) if err != nil { return err } @@ -658,7 +658,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err updateMD := func() error { // Reload the instance in the case of a fingerprint mismatch - instance, err = getInstance(config, d); + instance, err = getInstance(config, d) if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_project_metadata.go b/builtin/providers/google/resource_compute_project_metadata.go index 83b6fb0df..c2f8a4a5f 100644 --- a/builtin/providers/google/resource_compute_project_metadata.go +++ b/builtin/providers/google/resource_compute_project_metadata.go @@ -72,10 +72,10 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface err := MetadataRetryWrapper(createMD) if err != nil { - return err; + return err } - return resourceComputeProjectMetadataRead(d, meta); + return resourceComputeProjectMetadataRead(d, meta) } func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { @@ -115,7 +115,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface md := project.CommonInstanceMetadata - MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) + MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() @@ -133,10 +133,10 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface err := MetadataRetryWrapper(updateMD) if err != nil { - return err; + return err } - return resourceComputeProjectMetadataRead(d, meta); + return resourceComputeProjectMetadataRead(d, meta) } return nil diff --git a/builtin/providers/google/resource_compute_vpn_gateway.go b/builtin/providers/google/resource_compute_vpn_gateway.go index ba25aeb1f..bd5350b9c 100644 --- a/builtin/providers/google/resource_compute_vpn_gateway.go +++ b/builtin/providers/google/resource_compute_vpn_gateway.go @@ -56,8 +56,8 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) vpnGateway := &compute.TargetVpnGateway{ - Name: name, - Network: network, + Name: name, + Network: network, } if v, ok := d.GetOk("description"); ok { diff --git a/builtin/providers/google/resource_storage_bucket.go b/builtin/providers/google/resource_storage_bucket.go index 64e4fd434..9118119a8 100644 --- a/builtin/providers/google/resource_storage_bucket.go +++ b/builtin/providers/google/resource_storage_bucket.go @@ -128,8 +128,8 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("At most one website block is allowed") } - // Setting fields to "" to be explicit that the PATCH call will - // delete this field. + // Setting fields to "" to be explicit that the PATCH call will + // delete this field. if len(websites) == 0 { sb.Website.NotFoundPage = "" sb.Website.MainPageSuffix = "" diff --git a/builtin/providers/google/resource_storage_bucket_acl.go b/builtin/providers/google/resource_storage_bucket_acl.go index 1c2ef2ab6..3b866e0ad 100644 --- a/builtin/providers/google/resource_storage_bucket_acl.go +++ b/builtin/providers/google/resource_storage_bucket_acl.go @@ -24,9 +24,9 @@ func resourceStorageBucketAcl() *schema.Resource { ForceNew: true, }, "predefined_acl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, }, "role_entity": &schema.Schema{ Type: schema.TypeList, @@ -83,7 +83,7 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er if len(predefined_acl) > 0 { if len(role_entity) > 0 { return fmt.Errorf("Error, you cannot specify both " + - "\"predefined_acl\" and \"role_entity\""); + "\"predefined_acl\" and \"role_entity\"") } res, err := config.clientStorage.Buckets.Get(bucket).Do() @@ -99,9 +99,9 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error updating bucket %s: %v", bucket, err) } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } else if len(role_entity) > 0 { - for _, v := range(role_entity) { + for _, v := range role_entity { pair, err := getRoleEntityPair(v.(string)) bucketAccessControl := &storage.BucketAccessControl{ @@ -118,7 +118,7 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er } } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } if len(default_acl) > 0 { @@ -135,13 +135,12 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error updating bucket %s: %v", bucket, err) } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } return nil } - func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -153,7 +152,7 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro role_entity := make([]interface{}, 0) re_local := d.Get("role_entity").([]interface{}) re_local_map := make(map[string]string) - for _, v := range(re_local) { + for _, v := range re_local { res, err := getRoleEntityPair(v.(string)) if err != nil { @@ -170,7 +169,7 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro return err } - for _, v := range(res.Items) { + for _, v := range res.Items { log.Printf("[DEBUG]: examining re %s-%s", v.Role, v.Entity) // We only store updates to the locally defined access controls if _, in := re_local_map[v.Entity]; in { @@ -196,7 +195,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er old_re, new_re := o.([]interface{}), n.([]interface{}) old_re_map := make(map[string]string) - for _, v := range(old_re) { + for _, v := range old_re { res, err := getRoleEntityPair(v.(string)) if err != nil { @@ -207,7 +206,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er old_re_map[res.Entity] = res.Role } - for _, v := range(new_re) { + for _, v := range new_re { pair, err := getRoleEntityPair(v.(string)) bucketAccessControl := &storage.BucketAccessControl{ @@ -233,7 +232,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er } } - for entity, _ := range(old_re_map) { + for entity, _ := range old_re_map { log.Printf("[DEBUG]: removing entity %s", entity) err := config.clientStorage.BucketAccessControls.Delete(bucket, entity).Do() @@ -242,7 +241,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er } } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } if d.HasChange("default_acl") { @@ -261,7 +260,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error updating bucket %s: %v", bucket, err) } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } return nil @@ -273,7 +272,7 @@ func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) er bucket := d.Get("bucket").(string) re_local := d.Get("role_entity").([]interface{}) - for _, v := range(re_local) { + for _, v := range re_local { res, err := getRoleEntityPair(v.(string)) if err != nil { return err diff --git a/builtin/providers/google/resource_storage_bucket_acl_test.go b/builtin/providers/google/resource_storage_bucket_acl_test.go index afcb991c5..9cdc2b173 100644 --- a/builtin/providers/google/resource_storage_bucket_acl_test.go +++ b/builtin/providers/google/resource_storage_bucket_acl_test.go @@ -2,8 +2,8 @@ package google import ( "fmt" - "testing" "math/rand" + "testing" "time" "github.com/hashicorp/terraform/helper/resource" @@ -24,13 +24,13 @@ var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", rand.New(rand func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageBucketsAclBasic1, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), ), @@ -41,13 +41,13 @@ func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageBucketsAclBasic1, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), ), @@ -55,7 +55,7 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageBucketsAclBasic2, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner), ), @@ -63,7 +63,7 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageBucketsAclBasicDelete, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1), testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner), @@ -75,13 +75,13 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageBucketsAclBasic2, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner), ), @@ -89,7 +89,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageBucketsAclBasic3, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_reader), ), @@ -97,7 +97,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageBucketsAclBasicDelete, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1), testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner), @@ -109,7 +109,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ @@ -146,7 +146,7 @@ func testAccCheckGoogleStorageBucketAcl(bucket, roleEntityS string) resource.Tes return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) } - if (res.Role != roleEntity.Role) { + if res.Role != roleEntity.Role { return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) } @@ -218,7 +218,6 @@ resource "google_storage_bucket_acl" "acl" { } `, testAclBucketName, roleEntityBasic2, roleEntityBasic3_reader) - var testGoogleStorageBucketsAclPredefined = fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" diff --git a/builtin/providers/google/resource_storage_bucket_object.go b/builtin/providers/google/resource_storage_bucket_object.go index 473349d3c..231153a85 100644 --- a/builtin/providers/google/resource_storage_bucket_object.go +++ b/builtin/providers/google/resource_storage_bucket_object.go @@ -32,10 +32,10 @@ func resourceStorageBucketObject() *schema.Resource { ForceNew: true, }, "predefined_acl": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, + Optional: true, + ForceNew: true, }, "md5hash": &schema.Schema{ Type: schema.TypeString, @@ -75,7 +75,6 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) insertCall.PredefinedAcl(v.(string)) } - _, err = insertCall.Do() if err != nil { diff --git a/builtin/providers/google/resource_storage_bucket_object_test.go b/builtin/providers/google/resource_storage_bucket_object_test.go index d7be902a1..e84822fdd 100644 --- a/builtin/providers/google/resource_storage_bucket_object_test.go +++ b/builtin/providers/google/resource_storage_bucket_object_test.go @@ -1,11 +1,11 @@ package google import ( - "fmt" - "testing" - "io/ioutil" "crypto/md5" "encoding/base64" + "fmt" + "io/ioutil" + "testing" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -48,7 +48,6 @@ func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCh objectsService := storage.NewObjectsService(config.clientStorage) - getCall := objectsService.Get(bucket, object) res, err := getCall.Do() @@ -56,7 +55,7 @@ func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCh return fmt.Errorf("Error retrieving contents of object %s: %s", object, err) } - if (md5 != res.Md5Hash) { + if md5 != res.Md5Hash { return fmt.Errorf("Error contents of %s garbled, md5 hashes don't match (%s, %s)", object, md5, res.Md5Hash) } diff --git a/builtin/providers/google/resource_storage_object_acl.go b/builtin/providers/google/resource_storage_object_acl.go index 867453284..5212f81db 100644 --- a/builtin/providers/google/resource_storage_object_acl.go +++ b/builtin/providers/google/resource_storage_object_acl.go @@ -65,7 +65,7 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er if len(predefined_acl) > 0 { if len(role_entity) > 0 { return fmt.Errorf("Error, you cannot specify both " + - "\"predefined_acl\" and \"role_entity\""); + "\"predefined_acl\" and \"role_entity\"") } res, err := config.clientStorage.Objects.Get(bucket, object).Do() @@ -74,16 +74,16 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error reading object %s: %v", bucket, err) } - res, err = config.clientStorage.Objects.Update(bucket,object, + res, err = config.clientStorage.Objects.Update(bucket, object, res).PredefinedAcl(predefined_acl).Do() if err != nil { return fmt.Errorf("Error updating object %s: %v", bucket, err) } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } else if len(role_entity) > 0 { - for _, v := range(role_entity) { + for _, v := range role_entity { pair, err := getRoleEntityPair(v.(string)) objectAccessControl := &storage.ObjectAccessControl{ @@ -101,14 +101,13 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er } } - return resourceStorageObjectAclRead(d, meta); + return resourceStorageObjectAclRead(d, meta) } return fmt.Errorf("Error, you must specify either " + - "\"predefined_acl\" or \"role_entity\""); + "\"predefined_acl\" or \"role_entity\"") } - func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -121,7 +120,7 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro role_entity := make([]interface{}, 0) re_local := d.Get("role_entity").([]interface{}) re_local_map := make(map[string]string) - for _, v := range(re_local) { + for _, v := range re_local { res, err := getRoleEntityPair(v.(string)) if err != nil { @@ -138,10 +137,10 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro return err } - for _, v := range(res.Items) { + for _, v := range res.Items { role := "" entity := "" - for key, val := range (v.(map[string]interface{})) { + for key, val := range v.(map[string]interface{}) { if key == "role" { role = val.(string) } else if key == "entity" { @@ -172,7 +171,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er old_re, new_re := o.([]interface{}), n.([]interface{}) old_re_map := make(map[string]string) - for _, v := range(old_re) { + for _, v := range old_re { res, err := getRoleEntityPair(v.(string)) if err != nil { @@ -183,7 +182,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er old_re_map[res.Entity] = res.Role } - for _, v := range(new_re) { + for _, v := range new_re { pair, err := getRoleEntityPair(v.(string)) objectAccessControl := &storage.ObjectAccessControl{ @@ -209,7 +208,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er } } - for entity, _ := range(old_re_map) { + for entity, _ := range old_re_map { log.Printf("[DEBUG]: removing entity %s", entity) err := config.clientStorage.ObjectAccessControls.Delete(bucket, object, entity).Do() @@ -218,7 +217,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er } } - return resourceStorageObjectAclRead(d, meta); + return resourceStorageObjectAclRead(d, meta) } return nil @@ -231,7 +230,7 @@ func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) er object := d.Get("object").(string) re_local := d.Get("role_entity").([]interface{}) - for _, v := range(re_local) { + for _, v := range re_local { res, err := getRoleEntityPair(v.(string)) if err != nil { return err diff --git a/builtin/providers/google/resource_storage_object_acl_test.go b/builtin/providers/google/resource_storage_object_acl_test.go index f0154aca6..ff14f683c 100644 --- a/builtin/providers/google/resource_storage_object_acl_test.go +++ b/builtin/providers/google/resource_storage_object_acl_test.go @@ -2,9 +2,9 @@ package google import ( "fmt" - "testing" - "math/rand" "io/ioutil" + "math/rand" + "testing" "time" "github.com/hashicorp/terraform/helper/resource" @@ -32,7 +32,7 @@ func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageObjectsAclBasic1, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic1), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -58,7 +58,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageObjectsAclBasic1, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic1), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -68,7 +68,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageObjectsAclBasic2, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic2), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -78,7 +78,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageObjectsAclBasicDelete, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, testAclObjectName, roleEntityBasic1), testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, @@ -106,7 +106,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageObjectsAclBasic2, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic2), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -116,7 +116,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageObjectsAclBasic3, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic2), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -126,7 +126,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageObjectsAclBasicDelete, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, testAclObjectName, roleEntityBasic1), testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, @@ -171,7 +171,7 @@ func testAccCheckGoogleStorageObjectAcl(bucket, object, roleEntityS string) reso return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) } - if (res.Role != roleEntity.Role) { + if res.Role != roleEntity.Role { return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) } @@ -289,7 +289,7 @@ resource "google_storage_object_acl" "acl" { role_entity = ["%s", "%s"] } `, testAclBucketName, testAclObjectName, tfObjectAcl.Name(), - roleEntityBasic2, roleEntityBasic3_reader) + roleEntityBasic2, roleEntityBasic3_reader) var testGoogleStorageObjectsAclPredefined = fmt.Sprintf(` resource "google_storage_bucket" "bucket" { diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go index cd5a5d567..e049269a9 100644 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go +++ b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go @@ -136,7 +136,7 @@ func resourceBlockStorageVolumeV1Create(d *schema.ResourceData, meta interface{} v.ID) stateConf := &resource.StateChangeConf{ - Pending: []string{"downloading"}, + Pending: []string{"downloading"}, Target: "available", Refresh: VolumeV1StateRefreshFunc(blockStorageClient, v.ID), Timeout: 10 * time.Minute, diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go index 75014cc75..3101f41bc 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go @@ -610,7 +610,6 @@ func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove) - for _, g := range secgroupsToRemove.List() { err := secgroups.RemoveServerFromGroup(computeClient, d.Id(), g.(string)).ExtractErr() if err != nil { diff --git a/builtin/providers/rundeck/resource_job.go b/builtin/providers/rundeck/resource_job.go index 7411d5746..c9af25b0b 100644 --- a/builtin/providers/rundeck/resource_job.go +++ b/builtin/providers/rundeck/resource_job.go @@ -340,10 +340,10 @@ func jobFromResourceData(d *schema.ResourceData) (*rundeck.JobDetail, error) { LogLevel: d.Get("log_level").(string), AllowConcurrentExecutions: d.Get("allow_concurrent_executions").(bool), Dispatch: &rundeck.JobDispatch{ - MaxThreadCount: d.Get("max_thread_count").(int), - ContinueOnError: d.Get("continue_on_error").(bool), - RankAttribute: d.Get("rank_attribute").(string), - RankOrder: d.Get("rank_order").(string), + MaxThreadCount: d.Get("max_thread_count").(int), + ContinueOnError: d.Get("continue_on_error").(bool), + RankAttribute: d.Get("rank_attribute").(string), + RankOrder: d.Get("rank_order").(string), }, } diff --git a/builtin/provisioners/chef/resource_provisioner.go b/builtin/provisioners/chef/resource_provisioner.go index d7dbd718d..7b94486d2 100644 --- a/builtin/provisioners/chef/resource_provisioner.go +++ b/builtin/provisioners/chef/resource_provisioner.go @@ -326,7 +326,6 @@ func (p *Provisioner) runChefClientFunc( cmd = fmt.Sprintf("%s -j %q -E %q", chefCmd, fb, p.Environment) } - if p.LogToFile { if err := os.MkdirAll(logfileDir, 0755); err != nil { return fmt.Errorf("Error creating logfile directory %s: %v", logfileDir, err) From 05a091f892be4a786a1026dc0f61bed8c5ae8350 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 7 Oct 2015 15:58:05 -0500 Subject: [PATCH 107/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d95149fe..419695b7e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ FEATURES: * **New provider: `rundeck`** [GH-2412] + * **New provider: `packet`** [GH-2260] * **New resource: `cloudstack_loadbalancer_rule`** [GH-2934] * **New resource: `google_compute_project_metadata`** [GH-3065] * **New resources: `aws_ami`, `aws_ami_copy`, `aws_ami_from_instance`** [GH-2784] From d089ac8e8276067b35d823e66bd9202331217eef Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 7 Oct 2015 18:18:26 -0500 Subject: [PATCH 108/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 419695b7e..5b8a19045 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ BUG FIXES: * provider/aws: Allow `weight = 0` in Route53 records [GH-3196] * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. [GH-3235] * provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313] + * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/openstack: add state 'downloading' to list of expected states in `blockstorage_volume_v1` creation [GH-2866] * provider/openstack: remove security groups (by name) before adding security From 2b9f4f895eae1d58972b106ebafeb5515fc2603f Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 1 Oct 2015 15:12:46 -0700 Subject: [PATCH 109/220] provider/aws: Add support for aws_elasticsearch_domain --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_elasticsearch_domain.go | 399 ++++++++++++++++++ builtin/providers/aws/structure.go | 111 +++++ 4 files changed, 516 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_elasticsearch_domain.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index a20405997..badc3e20e 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -18,6 +18,7 @@ import ( "github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/efs" "github.com/aws/aws-sdk-go/service/elasticache" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/kinesis" @@ -51,6 +52,7 @@ type AWSClient struct { ecsconn *ecs.ECS efsconn *efs.EFS elbconn *elb.ELB + esconn *elasticsearch.ElasticsearchService autoscalingconn *autoscaling.AutoScaling s3conn *s3.S3 sqsconn *sqs.SQS @@ -157,6 +159,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing EFS Connection") client.efsconn = efs.New(awsConfig) + log.Println("[INFO] Initializing ElasticSearch Connection") + client.esconn = elasticsearch.New(awsConfig) + log.Println("[INFO] Initializing Route 53 connection") client.r53conn = route53.New(usEast1AwsConfig) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index c915c61fb..42b1d6242 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -182,6 +182,7 @@ func Provider() terraform.ResourceProvider { "aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(), "aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(), "aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(), + "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), "aws_elb": resourceAwsElb(), "aws_flow_log": resourceAwsFlowLog(), "aws_iam_access_key": resourceAwsIamAccessKey(), diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain.go b/builtin/providers/aws/resource_aws_elasticsearch_domain.go new file mode 100644 index 000000000..8f2d6c9c9 --- /dev/null +++ b/builtin/providers/aws/resource_aws_elasticsearch_domain.go @@ -0,0 +1,399 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticSearchDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticSearchDomainCreate, + Read: resourceAwsElasticSearchDomainRead, + Update: resourceAwsElasticSearchDomainUpdate, + Delete: resourceAwsElasticSearchDomainDelete, + + Schema: map[string]*schema.Schema{ + "access_policies": &schema.Schema{ + Type: schema.TypeString, + StateFunc: normalizeJson, + Optional: true, + }, + "advanced_options": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Computed: true, + }, + "domain_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z]+`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter or number", k)) + } + if !regexp.MustCompile(`^[0-9A-Za-z][0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase characters, numbers and hyphens", k)) + } + return + }, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "ebs_options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ebs_enabled": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + }, + "iops": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "volume_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "volume_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "cluster_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dedicated_master_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "dedicated_master_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "dedicated_master_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "instance_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "instance_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "m3.medium.elasticsearch", + }, + "zone_awareness_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "snapshot_options": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automated_snapshot_start_hour": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + input := elasticsearch.CreateElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + } + + if v, ok := d.GetOk("access_policies"); ok { + input.AccessPolicies = aws.String(v.(string)) + } + + if v, ok := d.GetOk("advanced_options"); ok { + input.AdvancedOptions = stringMapToPointers(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("ebs_options"); ok { + options := v.([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single ebs_options block is expected") + } else if len(options) == 1 { + if options[0] == nil { + return fmt.Errorf("At least one field is expected inside ebs_options") + } + + s := options[0].(map[string]interface{}) + input.EBSOptions = expandESEBSOptions(s) + } + } + + if v, ok := d.GetOk("cluster_config"); ok { + config := v.([]interface{}) + + if len(config) > 1 { + return fmt.Errorf("Only a single cluster_config block is expected") + } else if len(config) == 1 { + if config[0] == nil { + return fmt.Errorf("At least one field is expected inside cluster_config") + } + m := config[0].(map[string]interface{}) + input.ElasticsearchClusterConfig = expandESClusterConfig(m) + } + } + + if v, ok := d.GetOk("snapshot_options"); ok { + options := v.([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single snapshot_options block is expected") + } else if len(options) == 1 { + if options[0] == nil { + return fmt.Errorf("At least one field is expected inside snapshot_options") + } + + o := options[0].(map[string]interface{}) + + snapshotOptions := elasticsearch.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))), + } + + input.SnapshotOptions = &snapshotOptions + } + } + + log.Printf("[DEBUG] Creating ElasticSearch domain: %s", input) + out, err := conn.CreateElasticsearchDomain(&input) + if err != nil { + return err + } + + d.SetId(*out.DomainStatus.ARN) + + log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be created", d.Id()) + err = resource.Retry(15*time.Minute, func() error { + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return resource.RetryError{Err: err} + } + + if !*out.DomainStatus.Processing && out.DomainStatus.Endpoint != nil { + return nil + } + + return fmt.Errorf("%q: Timeout while waiting for the domain to be created", d.Id()) + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] ElasticSearch domain %q created", d.Id()) + + return resourceAwsElasticSearchDomainRead(d, meta) +} + +func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] Received ElasticSearch domain: %s", out) + + ds := out.DomainStatus + + d.Set("access_policies", *ds.AccessPolicies) + err = d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions)) + if err != nil { + return err + } + d.Set("domain_id", *ds.DomainId) + d.Set("domain_name", *ds.DomainName) + if ds.Endpoint != nil { + d.Set("endpoint", *ds.Endpoint) + } + + err = d.Set("ebs_options", flattenESEBSOptions(ds.EBSOptions)) + if err != nil { + return err + } + err = d.Set("cluster_config", flattenESClusterConfig(ds.ElasticsearchClusterConfig)) + if err != nil { + return err + } + if ds.SnapshotOptions != nil { + d.Set("snapshot_options", map[string]interface{}{ + "automated_snapshot_start_hour": *ds.SnapshotOptions.AutomatedSnapshotStartHour, + }) + } + + d.Set("arn", *ds.ARN) + + return nil +} + +func resourceAwsElasticSearchDomainUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + input := elasticsearch.UpdateElasticsearchDomainConfigInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + } + + if d.HasChange("access_policies") { + input.AccessPolicies = aws.String(d.Get("access_policies").(string)) + } + + if d.HasChange("advanced_options") { + input.AdvancedOptions = stringMapToPointers(d.Get("advanced_options").(map[string]interface{})) + } + + if d.HasChange("ebs_options") { + options := d.Get("ebs_options").([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single ebs_options block is expected") + } else if len(options) == 1 { + s := options[0].(map[string]interface{}) + input.EBSOptions = expandESEBSOptions(s) + } + } + + if d.HasChange("cluster_config") { + config := d.Get("cluster_config").([]interface{}) + + if len(config) > 1 { + return fmt.Errorf("Only a single cluster_config block is expected") + } else if len(config) == 1 { + m := config[0].(map[string]interface{}) + input.ElasticsearchClusterConfig = expandESClusterConfig(m) + } + } + + if d.HasChange("snapshot_options") { + options := d.Get("snapshot_options").([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single snapshot_options block is expected") + } else if len(options) == 1 { + o := options[0].(map[string]interface{}) + + snapshotOptions := elasticsearch.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))), + } + + input.SnapshotOptions = &snapshotOptions + } + } + + _, err := conn.UpdateElasticsearchDomainConfig(&input) + if err != nil { + return err + } + + err = resource.Retry(25*time.Minute, func() error { + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return resource.RetryError{Err: err} + } + + if *out.DomainStatus.Processing == false { + return nil + } + + return fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id()) + }) + if err != nil { + return err + } + + return resourceAwsElasticSearchDomainRead(d, meta) +} + +func resourceAwsElasticSearchDomainDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + log.Printf("[DEBUG] Deleting ElasticSearch domain: %q", d.Get("domain_name").(string)) + _, err := conn.DeleteElasticsearchDomain(&elasticsearch.DeleteElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be deleted", d.Get("domain_name").(string)) + err = resource.Retry(15*time.Minute, func() error { + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return resource.RetryError{Err: err} + } + + if awsErr.Code() == "ResourceNotFoundException" { + return nil + } + + return resource.RetryError{Err: awsErr} + } + + if !*out.DomainStatus.Processing { + return nil + } + + return fmt.Errorf("%q: Timeout while waiting for the domain to be deleted", d.Id()) + }) + + d.SetId("") + + return err +} diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index dc7b6d89b..b738027f8 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/elasticache" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/route53" @@ -479,3 +480,113 @@ func validateRdsId(v interface{}, k string) (ws []string, errors []error) { } return } + +func expandESClusterConfig(m map[string]interface{}) *elasticsearch.ElasticsearchClusterConfig { + config := elasticsearch.ElasticsearchClusterConfig{} + + if v, ok := m["dedicated_master_enabled"]; ok { + isEnabled := v.(bool) + config.DedicatedMasterEnabled = aws.Bool(isEnabled) + + if isEnabled { + if v, ok := m["dedicated_master_count"]; ok && v.(int) > 0 { + config.DedicatedMasterCount = aws.Int64(int64(v.(int))) + } + if v, ok := m["dedicated_master_type"]; ok && v.(string) != "" { + config.DedicatedMasterType = aws.String(v.(string)) + } + } + } + + if v, ok := m["instance_count"]; ok { + config.InstanceCount = aws.Int64(int64(v.(int))) + } + if v, ok := m["instance_type"]; ok { + config.InstanceType = aws.String(v.(string)) + } + + if v, ok := m["zone_awareness_enabled"]; ok { + config.ZoneAwarenessEnabled = aws.Bool(v.(bool)) + } + + return &config +} + +func flattenESClusterConfig(c *elasticsearch.ElasticsearchClusterConfig) []map[string]interface{} { + m := map[string]interface{}{} + + if c.DedicatedMasterCount != nil { + m["dedicated_master_count"] = *c.DedicatedMasterCount + } + if c.DedicatedMasterEnabled != nil { + m["dedicated_master_enabled"] = *c.DedicatedMasterEnabled + } + if c.DedicatedMasterType != nil { + m["dedicated_master_type"] = *c.DedicatedMasterType + } + if c.InstanceCount != nil { + m["instance_count"] = *c.InstanceCount + } + if c.InstanceType != nil { + m["instance_type"] = *c.InstanceType + } + if c.ZoneAwarenessEnabled != nil { + m["zone_awareness_enabled"] = *c.ZoneAwarenessEnabled + } + + return []map[string]interface{}{m} +} + +func flattenESEBSOptions(o *elasticsearch.EBSOptions) []map[string]interface{} { + m := map[string]interface{}{} + + if o.EBSEnabled != nil { + m["ebs_enabled"] = *o.EBSEnabled + } + if o.Iops != nil { + m["iops"] = *o.Iops + } + if o.VolumeSize != nil { + m["volume_size"] = *o.VolumeSize + } + if o.VolumeType != nil { + m["volume_type"] = *o.VolumeType + } + + return []map[string]interface{}{m} +} + +func expandESEBSOptions(m map[string]interface{}) *elasticsearch.EBSOptions { + options := elasticsearch.EBSOptions{} + + if v, ok := m["ebs_enabled"]; ok { + options.EBSEnabled = aws.Bool(v.(bool)) + } + if v, ok := m["iops"]; ok && v.(int) > 0 { + options.Iops = aws.Int64(int64(v.(int))) + } + if v, ok := m["volume_size"]; ok && v.(int) > 0 { + options.VolumeSize = aws.Int64(int64(v.(int))) + } + if v, ok := m["volume_type"]; ok && v.(string) != "" { + options.VolumeType = aws.String(v.(string)) + } + + return &options +} + +func pointersMapToStringList(pointers map[string]*string) map[string]interface{} { + list := make(map[string]interface{}, len(pointers)) + for i, v := range pointers { + list[i] = *v + } + return list +} + +func stringMapToPointers(m map[string]interface{}) map[string]*string { + list := make(map[string]*string, len(m)) + for i, v := range m { + list[i] = aws.String(v.(string)) + } + return list +} From c221da9aeb8d541e941a68fdc0400b8f88ecefaf Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 1 Oct 2015 15:13:12 -0700 Subject: [PATCH 110/220] provider/aws: Add acceptance test for aws_elasticsearch_domain --- .../resource_aws_elasticsearch_domain_test.go | 122 ++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_elasticsearch_domain_test.go diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go b/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go new file mode 100644 index 000000000..dee675d0d --- /dev/null +++ b/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go @@ -0,0 +1,122 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSElasticSearchDomain_basic(t *testing.T) { + var domain elasticsearch.ElasticsearchDomainStatus + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckESDomainDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccESDomainConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), + ), + }, + }, + }) +} + +func TestAccAWSElasticSearchDomain_complex(t *testing.T) { + var domain elasticsearch.ElasticsearchDomainStatus + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckESDomainDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccESDomainConfig_complex, + Check: resource.ComposeTestCheckFunc( + testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), + ), + }, + }, + }) +} + +func testAccCheckESDomainExists(n string, domain *elasticsearch.ElasticsearchDomainStatus) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ES Domain ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).esconn + opts := &elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(rs.Primary.Attributes["domain_name"]), + } + + resp, err := conn.DescribeElasticsearchDomain(opts) + if err != nil { + return fmt.Errorf("Error describing domain: %s", err.Error()) + } + + *domain = *resp.DomainStatus + + return nil + } +} + +func testAccCheckESDomainDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_elasticsearch_domain" { + continue + } + + conn := testAccProvider.Meta().(*AWSClient).esconn + opts := &elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(rs.Primary.Attributes["domain_name"]), + } + + _, err := conn.DescribeElasticsearchDomain(opts) + if err != nil { + return fmt.Errorf("Error describing ES domains: %q", err.Error()) + } + } + return nil +} + +const testAccESDomainConfig_basic = ` +resource "aws_elasticsearch_domain" "example" { + domain_name = "tf-test-1" +} +` + +const testAccESDomainConfig_complex = ` +resource "aws_elasticsearch_domain" "example" { + domain_name = "tf-test-2" + + advanced_options { + "indices.fielddata.cache.size" = 80 + } + + ebs_options { + ebs_enabled = false + } + + cluster_config { + instance_count = 2 + zone_awareness_enabled = true + } + + snapshot_options { + automated_snapshot_start_hour = 23 + } +} +` From e65ef8f13ff2289de85ee34b1def591f416817ef Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 1 Oct 2015 15:13:32 -0700 Subject: [PATCH 111/220] provider/aws: Add docs for aws_elasticsearch_domain --- .../aws/r/elasticsearch_domain.html.markdown | 83 +++++++++++++++++++ website/source/layouts/aws.erb | 12 +++ 2 files changed, 95 insertions(+) create mode 100644 website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown diff --git a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown new file mode 100644 index 000000000..9dbacffcd --- /dev/null +++ b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown @@ -0,0 +1,83 @@ +--- +layout: "aws" +page_title: "AWS: aws_elasticsearch_domain" +sidebar_current: "docs-aws-elasticsearch-domain" +description: |- + Provides an ElasticSearch Domain. +--- + +# aws\_elasticsearch\_domain + + +## Example Usage + +``` +resource "aws_elasticsearch_domain" "es" { + domain_name = "tf-test" + advanced_options { + "rest.action.multi.allow_explicit_index" = true + } + + access_policies = < + > + ElasticSearch Resources + + + + > IAM Resources From 9f106bc98e9dc985d2f1ed5824925ded8ef918c3 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 12 Oct 2015 14:24:14 -0500 Subject: [PATCH 182/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d04f464e2..9160afcc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ IMPROVEMENTS: * provider/aws: Support IAM role names (previously just ARNs) in `aws_ecs_service.iam_role` [GH-3061] * provider/aws: Add update method to RDS Subnet groups, can modify subnets without recreating [GH-3053] * provider/aws: Paginate notifications returned for ASG Notifications [GH-3043] + * provider/aws: Adds additional S3 Bucket Object inputs [GH-3265] * provider/aws: add `ses_smtp_password` to `aws_iam_access_key` [GH-3165] * provider/aws: read `iam_instance_profile` for `aws_instance` and save to state [GH-3167] * provider/aws: allow `instance` to be computed in `aws_eip` [GH-3036] From ed25948651799ff7a03575faa2a9d3f5b016900b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 12 Oct 2015 14:24:57 -0500 Subject: [PATCH 183/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9160afcc8..4ca088c72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ FEATURES: * **New resource: `aws_elasticsearch_domain`** [GH-3443] * **New resource: `aws_directory_service_directory`** [GH-3228] * **New resource: `aws_autoscaling_lifecycle_hook`** [GH-3351] + * **New resource: `aws_placement_group`** [GH-3457] IMPROVEMENTS: From 810d0882792cedbb5ce3898b6d93673e5c461265 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 12 Oct 2015 15:50:04 -0500 Subject: [PATCH 184/220] Fix whitespace formatting with go fmt --- builtin/providers/aws/resource_aws_eip.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go index bf7a9e3c5..4b369ee60 100644 --- a/builtin/providers/aws/resource_aws_eip.go +++ b/builtin/providers/aws/resource_aws_eip.go @@ -30,13 +30,13 @@ func resourceAwsEip() *schema.Resource { "instance": &schema.Schema{ Type: schema.TypeString, Optional: true, - Computed: true, + Computed: true, }, "network_interface": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, }, "allocation_id": &schema.Schema{ From d3c5c0d85f72025536152921f80d72f63ba3580b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 18 Aug 2015 09:56:54 -0500 Subject: [PATCH 185/220] provider/aws: Update Security Group Rules to Version 2 --- .../aws/resource_aws_security_group_rule.go | 83 +++-- ...esource_aws_security_group_rule_migrate.go | 8 +- .../resource_aws_security_group_rule_test.go | 284 ++++++++++++++++-- 3 files changed, 332 insertions(+), 43 deletions(-) diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go index 97b6d4025..bd40c284f 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ b/builtin/providers/aws/resource_aws_security_group_rule.go @@ -20,7 +20,7 @@ func resourceAwsSecurityGroupRule() *schema.Resource { Read: resourceAwsSecurityGroupRuleRead, Delete: resourceAwsSecurityGroupRuleDelete, - SchemaVersion: 1, + SchemaVersion: 2, MigrateState: resourceAwsSecurityGroupRuleMigrateState, Schema: map[string]*schema.Schema{ @@ -67,14 +67,15 @@ func resourceAwsSecurityGroupRule() *schema.Resource { Optional: true, ForceNew: true, Computed: true, - ConflictsWith: []string{"cidr_blocks"}, + ConflictsWith: []string{"cidr_blocks", "self"}, }, "self": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + ConflictsWith: []string{"cidr_blocks"}, }, }, } @@ -142,7 +143,7 @@ information and instructions for recovery. Error message: %s`, awsErr.Message()) ruleType, autherr) } - d.SetId(ipPermissionIDHash(ruleType, perm)) + d.SetId(ipPermissionIDHash(sg_id, ruleType, perm)) return resourceAwsSecurityGroupRuleRead(d, meta) } @@ -158,24 +159,67 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) } var rule *ec2.IpPermission + var rules []*ec2.IpPermission ruleType := d.Get("type").(string) - var rl []*ec2.IpPermission switch ruleType { case "ingress": - rl = sg.IpPermissions + rules = sg.IpPermissions default: - rl = sg.IpPermissionsEgress + rules = sg.IpPermissionsEgress } - for _, r := range rl { - if d.Id() == ipPermissionIDHash(ruleType, r) { - rule = r + p := expandIPPerm(d, sg) + + if len(rules) == 0 { + return fmt.Errorf("No IPPerms") + } + + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue + } + + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } + + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } + + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } + + if remaining > 0 { + continue } + + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r) + rule = r } if rule == nil { - log.Printf("[DEBUG] Unable to find matching %s Security Group Rule for Group %s", - ruleType, sg_id) + log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", + ruleType, d.Id(), sg_id) d.SetId("") return nil } @@ -186,14 +230,14 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) d.Set("type", ruleType) var cb []string - for _, c := range rule.IpRanges { + for _, c := range p.IpRanges { cb = append(cb, *c.CidrIp) } d.Set("cidr_blocks", cb) - if len(rule.UserIdGroupPairs) > 0 { - s := rule.UserIdGroupPairs[0] + if len(p.UserIdGroupPairs) > 0 { + s := p.UserIdGroupPairs[0] d.Set("source_security_group_id", *s.GroupId) } @@ -285,8 +329,9 @@ func (b ByGroupPair) Less(i, j int) bool { panic("mismatched security group rules, may be a terraform bug") } -func ipPermissionIDHash(ruleType string, ip *ec2.IpPermission) string { +func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string { var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("%s-", sg_id)) if ip.FromPort != nil && *ip.FromPort > 0 { buf.WriteString(fmt.Sprintf("%d-", *ip.FromPort)) } diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go index 98ecced70..3dd6f5f72 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go @@ -17,6 +17,12 @@ func resourceAwsSecurityGroupRuleMigrateState( case 0: log.Println("[INFO] Found AWS Security Group State v0; migrating to v1") return migrateSGRuleStateV0toV1(is) + case 1: + log.Println("[INFO] Found AWS Security Group State v1; migrating to v2") + // migrating to version 2 of the schema is the same as 0->1, since the + // method signature has changed now and will use the security group id in + // the hash + return migrateSGRuleStateV0toV1(is) default: return is, fmt.Errorf("Unexpected schema version: %d", v) } @@ -37,7 +43,7 @@ func migrateSGRuleStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceS } log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - newID := ipPermissionIDHash(is.Attributes["type"], perm) + newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm) is.Attributes["id"] = newID is.ID = newID log.Printf("[DEBUG] Attributes after migration: %#v, new id: %s", is.Attributes, newID) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go index c160703f3..a00385ba7 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_test.go @@ -2,7 +2,7 @@ package aws import ( "fmt" - "reflect" + "log" "testing" "github.com/aws/aws-sdk-go/aws" @@ -90,15 +90,15 @@ func TestIpPermissionIDHash(t *testing.T) { Type string Output string }{ - {simple, "ingress", "sg-82613597"}, - {egress, "egress", "sg-363054720"}, - {egress_all, "egress", "sg-2766285362"}, - {vpc_security_group_source, "egress", "sg-2661404947"}, - {security_group_source, "egress", "sg-1841245863"}, + {simple, "ingress", "sg-3403497314"}, + {egress, "egress", "sg-1173186295"}, + {egress_all, "egress", "sg-766323498"}, + {vpc_security_group_source, "egress", "sg-351225364"}, + {security_group_source, "egress", "sg-2198807188"}, } for _, tc := range cases { - actual := ipPermissionIDHash(tc.Type, tc.Input) + actual := ipPermissionIDHash("sg-12345", tc.Type, tc.Input) if actual != tc.Output { t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual) } @@ -132,7 +132,7 @@ func TestAccAWSSecurityGroupRule_Ingress_VPC(t *testing.T) { Config: testAccAWSSecurityGroupRuleIngressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes(&group, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), resource.TestCheckResourceAttr( "aws_security_group_rule.ingress_1", "from_port", "80"), testRuleCount, @@ -169,7 +169,7 @@ func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) { Config: testAccAWSSecurityGroupRuleIngressClassicConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes(&group, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), resource.TestCheckResourceAttr( "aws_security_group_rule.ingress_1", "from_port", "80"), testRuleCount, @@ -231,7 +231,7 @@ func TestAccAWSSecurityGroupRule_Egress(t *testing.T) { Config: testAccAWSSecurityGroupRuleEgressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes(&group, "egress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"), ), }, }, @@ -256,6 +256,92 @@ func TestAccAWSSecurityGroupRule_SelfReference(t *testing.T) { }) } +// testing partial match implementation +func TestAccAWSSecurityGroupRule_PartialMatching_Basic(t *testing.T) { + var group ec2.SecurityGroup + + p := ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + &ec2.IpRange{CidrIp: aws.String("10.0.2.0/24")}, + &ec2.IpRange{CidrIp: aws.String("10.0.3.0/24")}, + &ec2.IpRange{CidrIp: aws.String("10.0.4.0/24")}, + }, + } + + o := ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + &ec2.IpRange{CidrIp: aws.String("10.0.5.0/24")}, + }, + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRulePartialMatching, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress", &group, &p, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.other", &group, &o, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.nat_ingress", &group, &o, "ingress"), + ), + }, + }, + }) +} + +func TestAccAWSSecurityGroupRule_PartialMatching_Source(t *testing.T) { + var group ec2.SecurityGroup + var nat ec2.SecurityGroup + var p ec2.IpPermission + + // This function creates the expected IPPermission with the group id from an + // external security group, needed because Security Group IDs are generated on + // AWS side and can't be known ahead of time. + setupSG := func(*terraform.State) error { + if nat.GroupId == nil { + return fmt.Errorf("Error: nat group has nil GroupID") + } + + p = ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + &ec2.UserIdGroupPair{GroupId: nat.GroupId}, + }, + } + + return nil + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRulePartialMatching_Source, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), + setupSG, + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.source_ingress", &group, &p, "ingress"), + ), + }, + }, + }) + +} + func testAccCheckAWSSecurityGroupRuleDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ec2conn @@ -319,14 +405,27 @@ func testAccCheckAWSSecurityGroupRuleExists(n string, group *ec2.SecurityGroup) } } -func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleType string) resource.TestCheckFunc { +func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGroup, p *ec2.IpPermission, ruleType string) resource.TestCheckFunc { return func(s *terraform.State) error { - p := &ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(8000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Security Group Rule Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Security Group Rule is set") } + + if p == nil { + p = &ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(8000), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, + } + } + + var matchingRule *ec2.IpPermission var rules []*ec2.IpPermission if ruleType == "ingress" { rules = group.IpPermissions @@ -338,15 +437,53 @@ func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleTy return fmt.Errorf("No IPPerms") } - // Compare our ingress - if !reflect.DeepEqual(rules[0], p) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - rules[0], - p) + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue + } + + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } + + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } + + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + matchingRule = r } - return nil + if matchingRule != nil { + log.Printf("[DEBUG] Matching rule found : %s", matchingRule) + return nil + } + + return fmt.Errorf("Error here\n\tlooking for %s, wasn't found in %s", p, rules) } } @@ -480,3 +617,104 @@ resource "aws_security_group_rule" "self" { security_group_id = "${aws_security_group.web.id}" } ` + +const testAccAWSSecurityGroupRulePartialMatching = ` +resource "aws_vpc" "default" { + cidr_block = "10.0.0.0/16" + tags { + Name = "tf-sg-rule-bug" + } +} + +resource "aws_security_group" "web" { + name = "tf-other" + vpc_id = "${aws_vpc.default.id}" + tags { + Name = "tf-other-sg" + } +} + +resource "aws_security_group" "nat" { + name = "tf-nat" + vpc_id = "${aws_vpc.default.id}" + tags { + Name = "tf-nat-sg" + } +} + +resource "aws_security_group_rule" "ingress" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] + + security_group_id = "${aws_security_group.web.id}" +} + +resource "aws_security_group_rule" "other" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["10.0.5.0/24"] + + security_group_id = "${aws_security_group.web.id}" +} + +// same a above, but different group, to guard against bad hashing +resource "aws_security_group_rule" "nat_ingress" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] + + security_group_id = "${aws_security_group.nat.id}" +} +` + +const testAccAWSSecurityGroupRulePartialMatching_Source = ` +resource "aws_vpc" "default" { + cidr_block = "10.0.0.0/16" + tags { + Name = "tf-sg-rule-bug" + } +} + +resource "aws_security_group" "web" { + name = "tf-other" + vpc_id = "${aws_vpc.default.id}" + tags { + Name = "tf-other-sg" + } +} + +resource "aws_security_group" "nat" { + name = "tf-nat" + vpc_id = "${aws_vpc.default.id}" + tags { + Name = "tf-nat-sg" + } +} + +resource "aws_security_group_rule" "source_ingress" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + + source_security_group_id = "${aws_security_group.nat.id}" + security_group_id = "${aws_security_group.web.id}" +} + +resource "aws_security_group_rule" "other_ingress" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] + + security_group_id = "${aws_security_group.web.id}" +} +` From e0bb04b82287565f33baa1dc4cdef8953000616e Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 18 Aug 2015 10:17:15 -0500 Subject: [PATCH 186/220] update expeded hash for migration test --- .../aws/resource_aws_security_group_rule_migrate_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go index 664f05039..f9352fa27 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go @@ -27,7 +27,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "from_port": "0", "source_security_group_id": "sg-11877275", }, - Expected: "sg-3766347571", + Expected: "sg-2889201120", }, "v0_2": { StateVersion: 0, @@ -44,7 +44,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "cidr_blocks.2": "172.16.3.0/24", "cidr_blocks.3": "172.16.4.0/24", "cidr_blocks.#": "4"}, - Expected: "sg-4100229787", + Expected: "sg-1826358977", }, } From 03aac9f42b7c1159abe681951acb5a3ac1aea34b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 21 Aug 2015 09:58:56 -0500 Subject: [PATCH 187/220] Expand on an error case with more descriptive error --- builtin/providers/aws/resource_aws_eip.go | 22 +++++++++---------- .../aws/resource_aws_security_group_rule.go | 8 ++++--- .../resource_aws_security_group_rule_test.go | 2 +- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go index 4b369ee60..0a7801bee 100644 --- a/builtin/providers/aws/resource_aws_eip.go +++ b/builtin/providers/aws/resource_aws_eip.go @@ -27,19 +27,19 @@ func resourceAwsEip() *schema.Resource { ForceNew: true, }, - "instance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + "instance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, - "network_interface": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + "network_interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, - "allocation_id": &schema.Schema{ + "allocation_id": &schema.Schema{ Type: schema.TypeString, Computed: true, }, diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go index bd40c284f..a1f078a82 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ b/builtin/providers/aws/resource_aws_security_group_rule.go @@ -171,7 +171,9 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) p := expandIPPerm(d, sg) if len(rules) == 0 { - return fmt.Errorf("No IPPerms") + return fmt.Errorf( + "[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)", + ruleType, *sg.GroupName, d.Id()) } for _, r := range rules { @@ -198,7 +200,7 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) if remaining > 0 { continue - } + } remaining = len(p.UserIdGroupPairs) for _, ip := range p.UserIdGroupPairs { @@ -211,7 +213,7 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) if remaining > 0 { continue - } + } log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r) rule = r diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go index a00385ba7..29e831446 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_test.go @@ -257,7 +257,7 @@ func TestAccAWSSecurityGroupRule_SelfReference(t *testing.T) { } // testing partial match implementation -func TestAccAWSSecurityGroupRule_PartialMatching_Basic(t *testing.T) { +func TestAccAWSSecurityGroupRule_PartialMatching_basic(t *testing.T) { var group ec2.SecurityGroup p := ec2.IpPermission{ From 9f3a17e9b4074b420431a8e345b377f543a654d2 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 12 Oct 2015 15:19:42 -0500 Subject: [PATCH 188/220] update sg rule ids --- builtin/providers/aws/resource_aws_eip.go | 22 +- .../aws/resource_aws_security_group_rule.go | 126 ++++---- ...esource_aws_security_group_rule_migrate.go | 14 +- ...ce_aws_security_group_rule_migrate_test.go | 4 +- .../resource_aws_security_group_rule_test.go | 270 +++++++++--------- 5 files changed, 218 insertions(+), 218 deletions(-) diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go index 0a7801bee..4b369ee60 100644 --- a/builtin/providers/aws/resource_aws_eip.go +++ b/builtin/providers/aws/resource_aws_eip.go @@ -27,19 +27,19 @@ func resourceAwsEip() *schema.Resource { ForceNew: true, }, - "instance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + "instance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, - "network_interface": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + "network_interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, - "allocation_id": &schema.Schema{ + "allocation_id": &schema.Schema{ Type: schema.TypeString, Computed: true, }, diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go index a1f078a82..55499cfd5 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ b/builtin/providers/aws/resource_aws_security_group_rule.go @@ -20,7 +20,7 @@ func resourceAwsSecurityGroupRule() *schema.Resource { Read: resourceAwsSecurityGroupRuleRead, Delete: resourceAwsSecurityGroupRuleDelete, - SchemaVersion: 2, + SchemaVersion: 2, MigrateState: resourceAwsSecurityGroupRuleMigrateState, Schema: map[string]*schema.Schema{ @@ -67,15 +67,15 @@ func resourceAwsSecurityGroupRule() *schema.Resource { Optional: true, ForceNew: true, Computed: true, - ConflictsWith: []string{"cidr_blocks", "self"}, + ConflictsWith: []string{"cidr_blocks", "self"}, }, "self": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - ConflictsWith: []string{"cidr_blocks"}, + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + ConflictsWith: []string{"cidr_blocks"}, }, }, } @@ -143,7 +143,7 @@ information and instructions for recovery. Error message: %s`, awsErr.Message()) ruleType, autherr) } - d.SetId(ipPermissionIDHash(sg_id, ruleType, perm)) + d.SetId(ipPermissionIDHash(sg_id, ruleType, perm)) return resourceAwsSecurityGroupRuleRead(d, meta) } @@ -159,69 +159,69 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) } var rule *ec2.IpPermission - var rules []*ec2.IpPermission + var rules []*ec2.IpPermission ruleType := d.Get("type").(string) switch ruleType { case "ingress": - rules = sg.IpPermissions + rules = sg.IpPermissions default: - rules = sg.IpPermissionsEgress + rules = sg.IpPermissionsEgress } - p := expandIPPerm(d, sg) + p := expandIPPerm(d, sg) - if len(rules) == 0 { - return fmt.Errorf( - "[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)", - ruleType, *sg.GroupName, d.Id()) - } + if len(rules) == 0 { + return fmt.Errorf( + "[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)", + ruleType, *sg.GroupName, d.Id()) + } - for _, r := range rules { - if r.ToPort != nil && *p.ToPort != *r.ToPort { - continue - } - - if r.FromPort != nil && *p.FromPort != *r.FromPort { - continue - } - - if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { - continue - } - - remaining := len(p.IpRanges) - for _, ip := range p.IpRanges { - for _, rip := range r.IpRanges { - if *ip.CidrIp == *rip.CidrIp { - remaining-- - } - } - } - - if remaining > 0 { - continue - } - - remaining = len(p.UserIdGroupPairs) - for _, ip := range p.UserIdGroupPairs { - for _, rip := range r.UserIdGroupPairs { - if *ip.GroupId == *rip.GroupId { - remaining-- - } - } - } - - if remaining > 0 { - continue + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue } - log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r) - rule = r + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } + + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } + + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r) + rule = r } if rule == nil { - log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", - ruleType, d.Id(), sg_id) + log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", + ruleType, d.Id(), sg_id) d.SetId("") return nil } @@ -232,14 +232,14 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) d.Set("type", ruleType) var cb []string - for _, c := range p.IpRanges { + for _, c := range p.IpRanges { cb = append(cb, *c.CidrIp) } d.Set("cidr_blocks", cb) - if len(p.UserIdGroupPairs) > 0 { - s := p.UserIdGroupPairs[0] + if len(p.UserIdGroupPairs) > 0 { + s := p.UserIdGroupPairs[0] d.Set("source_security_group_id", *s.GroupId) } @@ -333,7 +333,7 @@ func (b ByGroupPair) Less(i, j int) bool { func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string { var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%s-", sg_id)) + buf.WriteString(fmt.Sprintf("%s-", sg_id)) if ip.FromPort != nil && *ip.FromPort > 0 { buf.WriteString(fmt.Sprintf("%d-", *ip.FromPort)) } @@ -373,7 +373,7 @@ func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string { } } - return fmt.Sprintf("sg-%d", hashcode.String(buf.String())) + return fmt.Sprintf("sgrule-%d", hashcode.String(buf.String())) } func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IpPermission { diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go index 3dd6f5f72..0b57f3f17 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go @@ -17,12 +17,12 @@ func resourceAwsSecurityGroupRuleMigrateState( case 0: log.Println("[INFO] Found AWS Security Group State v0; migrating to v1") return migrateSGRuleStateV0toV1(is) - case 1: - log.Println("[INFO] Found AWS Security Group State v1; migrating to v2") - // migrating to version 2 of the schema is the same as 0->1, since the - // method signature has changed now and will use the security group id in - // the hash - return migrateSGRuleStateV0toV1(is) + case 1: + log.Println("[INFO] Found AWS Security Group State v1; migrating to v2") + // migrating to version 2 of the schema is the same as 0->1, since the + // method signature has changed now and will use the security group id in + // the hash + return migrateSGRuleStateV0toV1(is) default: return is, fmt.Errorf("Unexpected schema version: %d", v) } @@ -43,7 +43,7 @@ func migrateSGRuleStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceS } log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm) + newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm) is.Attributes["id"] = newID is.ID = newID log.Printf("[DEBUG] Attributes after migration: %#v, new id: %s", is.Attributes, newID) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go index f9352fa27..87e3a1d63 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go @@ -27,7 +27,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "from_port": "0", "source_security_group_id": "sg-11877275", }, - Expected: "sg-2889201120", + Expected: "sg-2889201120", }, "v0_2": { StateVersion: 0, @@ -44,7 +44,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "cidr_blocks.2": "172.16.3.0/24", "cidr_blocks.3": "172.16.4.0/24", "cidr_blocks.#": "4"}, - Expected: "sg-1826358977", + Expected: "sg-1826358977", }, } diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go index 29e831446..f06dd3e13 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_test.go @@ -2,7 +2,7 @@ package aws import ( "fmt" - "log" + "log" "testing" "github.com/aws/aws-sdk-go/aws" @@ -90,15 +90,15 @@ func TestIpPermissionIDHash(t *testing.T) { Type string Output string }{ - {simple, "ingress", "sg-3403497314"}, - {egress, "egress", "sg-1173186295"}, - {egress_all, "egress", "sg-766323498"}, - {vpc_security_group_source, "egress", "sg-351225364"}, - {security_group_source, "egress", "sg-2198807188"}, + {simple, "ingress", "sgrule-3403497314"}, + {egress, "egress", "sgrule-1173186295"}, + {egress_all, "egress", "sgrule-766323498"}, + {vpc_security_group_source, "egress", "sgrule-351225364"}, + {security_group_source, "egress", "sgrule-2198807188"}, } for _, tc := range cases { - actual := ipPermissionIDHash("sg-12345", tc.Type, tc.Input) + actual := ipPermissionIDHash("sg-12345", tc.Type, tc.Input) if actual != tc.Output { t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual) } @@ -132,7 +132,7 @@ func TestAccAWSSecurityGroupRule_Ingress_VPC(t *testing.T) { Config: testAccAWSSecurityGroupRuleIngressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), resource.TestCheckResourceAttr( "aws_security_group_rule.ingress_1", "from_port", "80"), testRuleCount, @@ -169,7 +169,7 @@ func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) { Config: testAccAWSSecurityGroupRuleIngressClassicConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), resource.TestCheckResourceAttr( "aws_security_group_rule.ingress_1", "from_port", "80"), testRuleCount, @@ -231,7 +231,7 @@ func TestAccAWSSecurityGroupRule_Egress(t *testing.T) { Config: testAccAWSSecurityGroupRuleEgressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"), ), }, }, @@ -258,87 +258,87 @@ func TestAccAWSSecurityGroupRule_SelfReference(t *testing.T) { // testing partial match implementation func TestAccAWSSecurityGroupRule_PartialMatching_basic(t *testing.T) { - var group ec2.SecurityGroup + var group ec2.SecurityGroup - p := ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - &ec2.IpRange{CidrIp: aws.String("10.0.2.0/24")}, - &ec2.IpRange{CidrIp: aws.String("10.0.3.0/24")}, - &ec2.IpRange{CidrIp: aws.String("10.0.4.0/24")}, - }, - } + p := ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + &ec2.IpRange{CidrIp: aws.String("10.0.2.0/24")}, + &ec2.IpRange{CidrIp: aws.String("10.0.3.0/24")}, + &ec2.IpRange{CidrIp: aws.String("10.0.4.0/24")}, + }, + } - o := ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - &ec2.IpRange{CidrIp: aws.String("10.0.5.0/24")}, - }, - } + o := ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + &ec2.IpRange{CidrIp: aws.String("10.0.5.0/24")}, + }, + } - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSecurityGroupRulePartialMatching, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress", &group, &p, "ingress"), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.other", &group, &o, "ingress"), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.nat_ingress", &group, &o, "ingress"), - ), - }, - }, - }) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRulePartialMatching, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress", &group, &p, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.other", &group, &o, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.nat_ingress", &group, &o, "ingress"), + ), + }, + }, + }) } func TestAccAWSSecurityGroupRule_PartialMatching_Source(t *testing.T) { - var group ec2.SecurityGroup - var nat ec2.SecurityGroup - var p ec2.IpPermission + var group ec2.SecurityGroup + var nat ec2.SecurityGroup + var p ec2.IpPermission - // This function creates the expected IPPermission with the group id from an - // external security group, needed because Security Group IDs are generated on - // AWS side and can't be known ahead of time. - setupSG := func(*terraform.State) error { - if nat.GroupId == nil { - return fmt.Errorf("Error: nat group has nil GroupID") - } + // This function creates the expected IPPermission with the group id from an + // external security group, needed because Security Group IDs are generated on + // AWS side and can't be known ahead of time. + setupSG := func(*terraform.State) error { + if nat.GroupId == nil { + return fmt.Errorf("Error: nat group has nil GroupID") + } - p = ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{GroupId: nat.GroupId}, - }, - } + p = ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + &ec2.UserIdGroupPair{GroupId: nat.GroupId}, + }, + } - return nil - } + return nil + } - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSecurityGroupRulePartialMatching_Source, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), - setupSG, - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.source_ingress", &group, &p, "ingress"), - ), - }, - }, - }) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRulePartialMatching_Source, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), + setupSG, + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.source_ingress", &group, &p, "ingress"), + ), + }, + }, + }) } @@ -407,25 +407,25 @@ func testAccCheckAWSSecurityGroupRuleExists(n string, group *ec2.SecurityGroup) func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGroup, p *ec2.IpPermission, ruleType string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Security Group Rule Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group Rule is set") + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Security Group Rule Not found: %s", n) } - if p == nil { - p = &ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(8000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, - } - } + if rs.Primary.ID == "" { + return fmt.Errorf("No Security Group Rule is set") + } - var matchingRule *ec2.IpPermission + if p == nil { + p = &ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(8000), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, + } + } + + var matchingRule *ec2.IpPermission var rules []*ec2.IpPermission if ruleType == "ingress" { rules = group.IpPermissions @@ -437,53 +437,53 @@ func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGro return fmt.Errorf("No IPPerms") } - for _, r := range rules { - if r.ToPort != nil && *p.ToPort != *r.ToPort { - continue - } + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue + } - if r.FromPort != nil && *p.FromPort != *r.FromPort { - continue - } + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } - if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { - continue - } + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } - remaining := len(p.IpRanges) - for _, ip := range p.IpRanges { - for _, rip := range r.IpRanges { - if *ip.CidrIp == *rip.CidrIp { - remaining-- - } - } - } + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } - if remaining > 0 { - continue - } + if remaining > 0 { + continue + } - remaining = len(p.UserIdGroupPairs) - for _, ip := range p.UserIdGroupPairs { - for _, rip := range r.UserIdGroupPairs { - if *ip.GroupId == *rip.GroupId { - remaining-- - } - } - } + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } + } - if remaining > 0 { - continue - } - matchingRule = r + if remaining > 0 { + continue + } + matchingRule = r } - if matchingRule != nil { - log.Printf("[DEBUG] Matching rule found : %s", matchingRule) - return nil - } + if matchingRule != nil { + log.Printf("[DEBUG] Matching rule found : %s", matchingRule) + return nil + } - return fmt.Errorf("Error here\n\tlooking for %s, wasn't found in %s", p, rules) + return fmt.Errorf("Error here\n\tlooking for %s, wasn't found in %s", p, rules) } } From 8d84369738340701912a538c07d1b7926ef5c695 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 12 Oct 2015 16:03:43 -0500 Subject: [PATCH 189/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ca088c72..5bc6260f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ BUG FIXES: * provider/aws: Allow `weight = 0` in Route53 records [GH-3196] * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. [GH-3235] * provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313] + * provider/aws: Update Security Group Rules to Version 2 [GH-3019] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From 31b8f04bda7234c37bd01589bdb5f0c3d7a10c3c Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 9 Oct 2015 11:49:36 -0500 Subject: [PATCH 190/220] provider/aws: Migrate KeyPair to version 1 --- .../providers/aws/resource_aws_key_pair.go | 13 +++++ .../aws/resource_aws_key_pair_migrate.go | 38 +++++++++++++ .../aws/resource_aws_key_pair_migrate_test.go | 55 +++++++++++++++++++ 3 files changed, 106 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_key_pair_migrate.go create mode 100644 builtin/providers/aws/resource_aws_key_pair_migrate_test.go diff --git a/builtin/providers/aws/resource_aws_key_pair.go b/builtin/providers/aws/resource_aws_key_pair.go index e747fbfc5..0d6c51fcf 100644 --- a/builtin/providers/aws/resource_aws_key_pair.go +++ b/builtin/providers/aws/resource_aws_key_pair.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "strings" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -18,6 +19,9 @@ func resourceAwsKeyPair() *schema.Resource { Update: nil, Delete: resourceAwsKeyPairDelete, + SchemaVersion: 1, + MigrateState: resourceAwsKeyPairMigrateState, + Schema: map[string]*schema.Schema{ "key_name": &schema.Schema{ Type: schema.TypeString, @@ -29,6 +33,14 @@ func resourceAwsKeyPair() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + return strings.TrimSpace(v.(string)) + default: + return "" + } + }, }, "fingerprint": &schema.Schema{ Type: schema.TypeString, @@ -45,6 +57,7 @@ func resourceAwsKeyPairCreate(d *schema.ResourceData, meta interface{}) error { if keyName == "" { keyName = resource.UniqueId() } + publicKey := d.Get("public_key").(string) req := &ec2.ImportKeyPairInput{ KeyName: aws.String(keyName), diff --git a/builtin/providers/aws/resource_aws_key_pair_migrate.go b/builtin/providers/aws/resource_aws_key_pair_migrate.go new file mode 100644 index 000000000..0d56123aa --- /dev/null +++ b/builtin/providers/aws/resource_aws_key_pair_migrate.go @@ -0,0 +1,38 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsKeyPairMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Key Pair State v0; migrating to v1") + return migrateKeyPairStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } + + return is, nil +} + +func migrateKeyPairStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // replace public_key with a stripped version, removing `\n` from the end + // see https://github.com/hashicorp/terraform/issues/3455 + is.Attributes["public_key"] = strings.TrimSpace(is.Attributes["public_key"]) + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/builtin/providers/aws/resource_aws_key_pair_migrate_test.go b/builtin/providers/aws/resource_aws_key_pair_migrate_test.go new file mode 100644 index 000000000..825d3c40f --- /dev/null +++ b/builtin/providers/aws/resource_aws_key_pair_migrate_test.go @@ -0,0 +1,55 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestAWSKeyPairMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + ID string + Attributes map[string]string + Expected string + Meta interface{} + }{ + "v0_1": { + StateVersion: 0, + ID: "tf-testing-file", + Attributes: map[string]string{ + "fingerprint": "1d:cd:46:31:a9:4a:e0:06:8a:a1:22:cb:3b:bf:8e:42", + "key_name": "tf-testing-file", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", + }, + Expected: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", + }, + "v0_2": { + StateVersion: 0, + ID: "tf-testing-file", + Attributes: map[string]string{ + "fingerprint": "1d:cd:46:31:a9:4a:e0:06:8a:a1:22:cb:3b:bf:8e:42", + "key_name": "tf-testing-file", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock\n", + }, + Expected: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.ID, + Attributes: tc.Attributes, + } + is, err := resourceAwsKeyPairMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if is.Attributes["public_key"] != tc.Expected { + t.Fatalf("Bad public_key migration: %s\n\n expected: %s", is.Attributes["public_key"], tc.Expected) + } + } +} From c44e9d10a48994d0b2136592f420e275131b2384 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 12 Oct 2015 16:26:49 -0500 Subject: [PATCH 191/220] update migration test --- .../aws/resource_aws_security_group_rule_migrate_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go index 87e3a1d63..496834b8c 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go @@ -27,7 +27,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "from_port": "0", "source_security_group_id": "sg-11877275", }, - Expected: "sg-2889201120", + Expected: "sgrule-2889201120", }, "v0_2": { StateVersion: 0, @@ -44,7 +44,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "cidr_blocks.2": "172.16.3.0/24", "cidr_blocks.3": "172.16.4.0/24", "cidr_blocks.#": "4"}, - Expected: "sg-1826358977", + Expected: "sgrule-1826358977", }, } From 307902ec2d8ec5728fdbc3e135e38343775ded48 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 12 Oct 2015 16:34:16 -0500 Subject: [PATCH 192/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5bc6260f5..119e027b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -69,6 +69,7 @@ BUG FIXES: * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. [GH-3235] * provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313] * provider/aws: Update Security Group Rules to Version 2 [GH-3019] + * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From a811a72f11e327dde17a8c5db06e5b44765da482 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 12 Oct 2015 15:50:07 -0500 Subject: [PATCH 193/220] provider/aws: fix force_delete on ASGs The `ForceDelete` parameter was getting sent to the upstream API call, but only after we had already finished draining instances from Terraform, so it was a moot point by then. This fixes that by skipping the drain when force_delete is true, and it also simplifies the field config a bit: * set a default of false to simplify the logic * remove `ForceNew` since there's no need to replace the resource to flip this value * pull a detail comment from code into the docs --- .../aws/resource_aws_autoscaling_group.go | 20 +++++++++---------- .../aws/r/autoscaling_group.html.markdown | 5 ++++- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index 771bda2e3..e6d62b61a 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -73,8 +73,7 @@ func resourceAwsAutoscalingGroup() *schema.Resource { "force_delete": &schema.Schema{ Type: schema.TypeBool, Optional: true, - Computed: true, - ForceNew: true, + Default: false, }, "health_check_grace_period": &schema.Schema{ @@ -334,15 +333,9 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id()) - deleteopts := autoscaling.DeleteAutoScalingGroupInput{AutoScalingGroupName: aws.String(d.Id())} - - // You can force an autoscaling group to delete - // even if it's in the process of scaling a resource. - // Normally, you would set the min-size and max-size to 0,0 - // and then delete the group. This bypasses that and leaves - // resources potentially dangling. - if d.Get("force_delete").(bool) { - deleteopts.ForceDelete = aws.Bool(true) + deleteopts := autoscaling.DeleteAutoScalingGroupInput{ + AutoScalingGroupName: aws.String(d.Id()), + ForceDelete: aws.Bool(d.Get("force_delete").(bool)), } // We retry the delete operation to handle InUse/InProgress errors coming @@ -414,6 +407,11 @@ func getAwsAutoscalingGroup( func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn + if d.Get("force_delete").(bool) { + log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.") + return nil + } + // First, set the capacity to zero so the group will drain log.Printf("[DEBUG] Reducing autoscaling group capacity to zero") opts := autoscaling.UpdateAutoScalingGroupInput{ diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown index 022b1cf71..40831e99a 100644 --- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown @@ -57,7 +57,10 @@ The following arguments are supported: for this number of healthy instances all attached load balancers. (See also [Waiting for Capacity](#waiting-for-capacity) below.) * `force_delete` - (Optional) Allows deleting the autoscaling group without waiting - for all instances in the pool to terminate. + for all instances in the pool to terminate. You can force an autoscaling group to delete + even if it's in the process of scaling a resource. Normally, Terraform + drains all the instances before deleting the group. This bypasses that + behavior and potentially leaves resources dangling. * `load_balancers` (Optional) A list of load balancer names to add to the autoscaling group names. * `vpc_zone_identifier` (Optional) A list of subnet IDs to launch resources in. From 7549872780675a25474fbad4a9cab5746a032706 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 12 Oct 2015 17:20:05 -0500 Subject: [PATCH 194/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 119e027b3..a61e8af63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ BUG FIXES: * provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313] * provider/aws: Update Security Group Rules to Version 2 [GH-3019] * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] + * provider/aws: Fix force_delete on autoscaling groups [GH-3485] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From aaac9435ecf4fa139e5160c2e39295ccc5d223d8 Mon Sep 17 00:00:00 2001 From: Geert Theys Date: Tue, 13 Oct 2015 12:57:22 +0200 Subject: [PATCH 195/220] fix illegal char in the policy name aws_lb_cookie_stickiness_policy.elbland: Error creating LBCookieStickinessPolicy: ValidationError: Policy name cannot contain characters that are not letters, or digits or the dash. --- .../providers/aws/r/lb_cookie_stickiness_policy.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown b/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown index bb4ad524e..59e581c12 100644 --- a/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown +++ b/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown @@ -25,7 +25,7 @@ resource "aws_elb" "lb" { } resource "aws_lb_cookie_stickiness_policy" "foo" { - name = "foo_policy" + name = "foo-policy" load_balancer = "${aws_elb.lb.id}" lb_port = 80 cookie_expiration_period = 600 From 60b7037cdd9e6d903a662f3dbdf4c48b0b58dcc5 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 13 Oct 2015 06:20:46 -0500 Subject: [PATCH 196/220] provider/aws: Additional error checking to VPC Peering conn --- .../aws/resource_aws_vpc_peering_connection.go | 12 +++++++----- .../aws/resource_aws_vpc_peering_connection_test.go | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection.go b/builtin/providers/aws/resource_aws_vpc_peering_connection.go index b279797f6..6b7c4dc52 100644 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection.go +++ b/builtin/providers/aws/resource_aws_vpc_peering_connection.go @@ -127,6 +127,9 @@ func resourceVPCPeeringConnectionAccept(conn *ec2.EC2, id string) (string, error } resp, err := conn.AcceptVpcPeeringConnection(req) + if err != nil { + return "", err + } pc := resp.VpcPeeringConnection return *pc.Status.Code, err } @@ -153,16 +156,15 @@ func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error } pc := pcRaw.(*ec2.VpcPeeringConnection) - if *pc.Status.Code == "pending-acceptance" { + if pc.Status != nil && *pc.Status.Code == "pending-acceptance" { status, err := resourceVPCPeeringConnectionAccept(conn, d.Id()) - - log.Printf( - "[DEBUG] VPC Peering connection accept status %s", - status) if err != nil { return err } + log.Printf( + "[DEBUG] VPC Peering connection accept status: %s", + status) } } diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go index dc78a7082..8f7360250 100644 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go +++ b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go @@ -117,6 +117,7 @@ resource "aws_vpc" "bar" { resource "aws_vpc_peering_connection" "foo" { vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" + auto_accept = true } ` From 5266db31e26712f29d950abe46e22a9a925934d6 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 3 Sep 2015 22:57:56 +0100 Subject: [PATCH 197/220] Adding the ability to manage a glacier vault --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_glacier_vault.go | 380 ++++++++++++++++++ 3 files changed, 386 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_glacier_vault.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 5eac34e8a..f8f443b73 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -21,6 +21,7 @@ import ( "github.com/aws/aws-sdk-go/service/elasticache" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/glacier" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/lambda" @@ -67,6 +68,7 @@ type AWSClient struct { elasticacheconn *elasticache.ElastiCache lambdaconn *lambda.Lambda opsworksconn *opsworks.OpsWorks + glacierconn *glacier.Glacier } // Client configures and returns a fully initialized AWSClient @@ -184,6 +186,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing Directory Service connection") client.dsconn = directoryservice.New(awsConfig) + + log.Println("[INFO] Initializing Glacier connection") + client.glacierconn = glacier.New(awsConfig) } if len(errs) > 0 { diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index c740e4bc8..f73580d0f 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -187,6 +187,7 @@ func Provider() terraform.ResourceProvider { "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), "aws_elb": resourceAwsElb(), "aws_flow_log": resourceAwsFlowLog(), + "aws_glacier_vault": resourceAwsGlacierVault(), "aws_iam_access_key": resourceAwsIamAccessKey(), "aws_iam_group_policy": resourceAwsIamGroupPolicy(), "aws_iam_group": resourceAwsIamGroup(), diff --git a/builtin/providers/aws/resource_aws_glacier_vault.go b/builtin/providers/aws/resource_aws_glacier_vault.go new file mode 100644 index 000000000..b077a35cd --- /dev/null +++ b/builtin/providers/aws/resource_aws_glacier_vault.go @@ -0,0 +1,380 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/glacier" +) + +func resourceAwsGlacierVault() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsGlacierVaultCreate, + Read: resourceAwsGlacierVaultRead, + Update: resourceAwsGlacierVaultUpdate, + Delete: resourceAwsGlacierVaultDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters, hyphens, underscores, and periods allowed in %q", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return + }, + }, + + "location": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "access_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + StateFunc: normalizeJson, + }, + + "notification": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "events": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "sns_topic": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsGlacierVaultCreate(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + input := &glacier.CreateVaultInput{ + VaultName: aws.String(d.Get("name").(string)), + } + + out, err := glacierconn.CreateVault(input) + if err != nil { + return fmt.Errorf("Error creating Glacier Vault: %s", err) + } + + d.SetId(d.Get("name").(string)) + d.Set("location", *out.Location) + + return resourceAwsGlacierVaultUpdate(d, meta) +} + +func resourceAwsGlacierVaultUpdate(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + if err := setGlacierVaultTags(glacierconn, d); err != nil { + return err + } + + if d.HasChange("access_policy") { + if err := resourceAwsGlacierVaultPolicyUpdate(glacierconn, d); err != nil { + return err + } + } + + if d.HasChange("notification") { + if err := resourceAwsGlacierVaultNotificationUpdate(glacierconn, d); err != nil { + return err + } + } + + return resourceAwsGlacierVaultRead(d, meta) +} + +func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + input := &glacier.DescribeVaultInput{ + VaultName: aws.String(d.Id()), + } + + out, err := glacierconn.DescribeVault(input) + if err != nil { + return fmt.Errorf("Error reading Glacier Vault: %s", err.Error()) + } + + d.Set("arn", *out.VaultARN) + + tags, err := getGlacierVaultTags(glacierconn, d.Id()) + if err != nil { + return err + } + d.Set("tags", tags) + + log.Printf("[DEBUG] Getting the access_policy for Vault %s", d.Id()) + pol, err := glacierconn.GetVaultAccessPolicy(&glacier.GetVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + }) + + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ResourceNotFoundException" { + d.Set("access_policy", "") + } else if pol != nil { + d.Set("access_policy", normalizeJson(*pol.Policy.Policy)) + } else { + return err + } + + notifications, err := getGlacierVaultNotification(glacierconn, d.Id()) + if err != nil { + return err + } + d.Set("notification", notifications) + + return nil +} + +func resourceAwsGlacierVaultDelete(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + log.Printf("[DEBUG] Glacier Delete Vault: %s", d.Id()) + _, err := glacierconn.DeleteVault(&glacier.DeleteVaultInput{ + VaultName: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error deleting Glacier Vault: %s", err.Error()) + } + return nil +} + +func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { + + if v, ok := d.GetOk("notification"); ok { + settings := v.([]interface{}) + + if len(settings) > 1 { + return fmt.Errorf("Only a single Notification setup is allowed for Glacier Vault") + } else if len(settings) == 1 { + s := settings[0].(map[string]interface{}) + var events []*string + for _, id := range s["events"].(*schema.Set).List() { + event := id.(string) + if event != "ArchiveRetrievalCompleted" && event != "InventoryRetrievalCompleted" { + return fmt.Errorf("Glacier Vault Notification Events can only be 'ArchiveRetrievalCompleted' or 'InventoryRetrievalCompleted'") + } else { + events = append(events, aws.String(event)) + } + } + + _, err := glacierconn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{ + VaultName: aws.String(d.Id()), + VaultNotificationConfig: &glacier.VaultNotificationConfig{ + SNSTopic: aws.String(s["sns_topic"].(string)), + Events: events, + }, + }) + + if err != nil { + return fmt.Errorf("Error Updating Glacier Vault Notifications: %s", err.Error()) + } + } + } + + return nil +} + +func resourceAwsGlacierVaultPolicyUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { + vaultName := d.Id() + policyContents := d.Get("access_policy").(string) + + policy := &glacier.VaultAccessPolicy{ + Policy: aws.String(policyContents), + } + + if policyContents != "" { + log.Printf("[DEBUG] Glacier Vault: %s, put policy", vaultName) + + _, err := glacierconn.SetVaultAccessPolicy(&glacier.SetVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + Policy: policy, + }) + + if err != nil { + return fmt.Errorf("Error putting Glacier Vault policy: %s", err.Error()) + } + } else { + log.Printf("[DEBUG] Glacier Vault: %s, delete policy: %s", vaultName, policy) + _, err := glacierconn.DeleteVaultAccessPolicy(&glacier.DeleteVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("Error deleting Glacier Vault policy: %s", err.Error()) + } + } + + return nil +} + +func setGlacierVaultTags(conn *glacier.Glacier, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffGlacierVaultTags(mapGlacierVaultTags(o), mapGlacierVaultTags(n)) + + // Set tags + if len(remove) > 0 { + tagsToRemove := &glacier.RemoveTagsFromVaultInput{ + VaultName: aws.String(d.Id()), + TagKeys: glacierStringsToPointyString(remove), + } + + log.Printf("[DEBUG] Removing tags: from %s", d.Id()) + _, err := conn.RemoveTagsFromVault(tagsToRemove) + if err != nil { + return err + } + } + if len(create) > 0 { + tagsToAdd := &glacier.AddTagsToVaultInput{ + VaultName: aws.String(d.Id()), + Tags: glacierVaultTagsFromMap(create), + } + + log.Printf("[DEBUG] Creating tags: for %s", d.Id()) + _, err := conn.AddTagsToVault(tagsToAdd) + if err != nil { + return err + } + } + } + + return nil +} + +func mapGlacierVaultTags(m map[string]interface{}) map[string]string { + results := make(map[string]string) + for k, v := range m { + results[k] = v.(string) + } + + return results +} + +func diffGlacierVaultTags(oldTags, newTags map[string]string) (map[string]string, []string) { + + create := make(map[string]string) + for k, v := range newTags { + create[k] = v + } + + // Build the list of what to remove + var remove []string + for k, v := range oldTags { + old, ok := create[k] + if !ok || old != v { + // Delete it! + remove = append(remove, k) + } + } + + return create, remove +} + +func getGlacierVaultTags(glacierconn *glacier.Glacier, vaultName string) (map[string]string, error) { + request := &glacier.ListTagsForVaultInput{ + VaultName: aws.String(vaultName), + } + + log.Printf("[DEBUG] Getting the tags: for %s", vaultName) + response, err := glacierconn.ListTagsForVault(request) + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NoSuchTagSet" { + return map[string]string{}, nil + } else if err != nil { + return nil, err + } + + return glacierVaultTagsToMap(response.Tags), nil +} + +func glacierVaultTagsToMap(responseTags map[string]*string) map[string]string { + results := make(map[string]string, len(responseTags)) + for k, v := range responseTags { + results[k] = *v + } + + return results +} + +func glacierVaultTagsFromMap(responseTags map[string]string) map[string]*string { + results := make(map[string]*string, len(responseTags)) + for k, v := range responseTags { + results[k] = aws.String(v) + } + + return results +} + +func glacierStringsToPointyString(s []string) []*string { + results := make([]*string, len(s)) + for i, x := range s { + results[i] = aws.String(x) + } + + return results +} + +func glacierPointersToStringList(pointers []*string) []interface{} { + list := make([]interface{}, len(pointers)) + for i, v := range pointers { + list[i] = *v + } + return list +} + +func getGlacierVaultNotification(glacierconn *glacier.Glacier, vaultName string) ([]map[string]interface{}, error) { + request := &glacier.GetVaultNotificationsInput{ + VaultName: aws.String(vaultName), + } + + response, err := glacierconn.GetVaultNotifications(request) + if err != nil { + return nil, fmt.Errorf("Error reading Glacier Vault Notifications: %s", err.Error()) + } + + notifications := make(map[string]interface{}, 0) + + log.Print("[DEBUG] Flattening Glacier Vault Notifications") + + notifications["events"] = schema.NewSet(schema.HashString, glacierPointersToStringList(response.VaultNotificationConfig.Events)) + notifications["sns_topic"] = *response.VaultNotificationConfig.SNSTopic + + return []map[string]interface{}{notifications}, nil +} From 95d35ad77f5609a54f59b454b4860f3aa7dba33c Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 15 Sep 2015 23:32:54 +0100 Subject: [PATCH 198/220] Adding the the docs for the Glacier Vault resource Updating the glacier docs to include a link to the AWS developer guide --- .../aws/r/glacier_vault.html.markdown | 68 +++++++++++++++++++ website/source/layouts/aws.erb | 9 +++ 2 files changed, 77 insertions(+) create mode 100644 website/source/docs/providers/aws/r/glacier_vault.html.markdown diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown new file mode 100644 index 000000000..ad7e2a6d1 --- /dev/null +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -0,0 +1,68 @@ +--- +layout: "aws" +page_title: "AWS: aws_glacier_vault" +sidebar_current: "docs-aws-resource-glacier-vault" +description: |- + Provides a Glacier Vault. +--- + +# aws\_glacier\_vault + +Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-vaults.html) for a full explanation of the Glacier Vault functionality + +## Example Usage + +``` +resource "aws_glacier_vault" "my_archive" { + name = "MyArchive" + + notification { + sns_topic = "arn:aws:sns:us-west-2:432981146916:MyArchiveTopic" + events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] + } + + access_policy = < + > + Glacier Resources + + + > IAM Resources From 2a7b8be9f3aae116c5168f10aa19ea6c7273e643 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 17 Sep 2015 01:46:10 +0100 Subject: [PATCH 199/220] Gofmt of the aws glacier vault resource --- .../aws/resource_aws_glacier_vault.go | 29 +-- .../aws/resource_aws_glacier_vault_test.go | 175 ++++++++++++++++++ .../aws/r/glacier_vault.html.markdown | 19 +- 3 files changed, 206 insertions(+), 17 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_glacier_vault_test.go diff --git a/builtin/providers/aws/resource_aws_glacier_vault.go b/builtin/providers/aws/resource_aws_glacier_vault.go index b077a35cd..21ac4d7cc 100644 --- a/builtin/providers/aws/resource_aws_glacier_vault.go +++ b/builtin/providers/aws/resource_aws_glacier_vault.go @@ -143,7 +143,7 @@ func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error VaultName: aws.String(d.Id()), }) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ResourceNotFoundException" { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { d.Set("access_policy", "") } else if pol != nil { d.Set("access_policy", normalizeJson(*pol.Policy.Policy)) @@ -152,10 +152,13 @@ func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error } notifications, err := getGlacierVaultNotification(glacierconn, d.Id()) - if err != nil { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { + d.Set("notification", "") + } else if pol != nil { + d.Set("notification", notifications) + } else { return err } - d.Set("notification", notifications) return nil } @@ -179,17 +182,12 @@ func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d * settings := v.([]interface{}) if len(settings) > 1 { - return fmt.Errorf("Only a single Notification setup is allowed for Glacier Vault") + return fmt.Errorf("Only a single Notification Block is allowed for Glacier Vault") } else if len(settings) == 1 { s := settings[0].(map[string]interface{}) var events []*string for _, id := range s["events"].(*schema.Set).List() { - event := id.(string) - if event != "ArchiveRetrievalCompleted" && event != "InventoryRetrievalCompleted" { - return fmt.Errorf("Glacier Vault Notification Events can only be 'ArchiveRetrievalCompleted' or 'InventoryRetrievalCompleted'") - } else { - events = append(events, aws.String(event)) - } + events = append(events, aws.String(id.(string))) } _, err := glacierconn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{ @@ -204,6 +202,15 @@ func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d * return fmt.Errorf("Error Updating Glacier Vault Notifications: %s", err.Error()) } } + } else { + _, err := glacierconn.DeleteVaultNotifications(&glacier.DeleteVaultNotificationsInput{ + VaultName: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("Error Removing Glacier Vault Notifications: %s", err.Error()) + } + } return nil @@ -315,7 +322,7 @@ func getGlacierVaultTags(glacierconn *glacier.Glacier, vaultName string) (map[st log.Printf("[DEBUG] Getting the tags: for %s", vaultName) response, err := glacierconn.ListTagsForVault(request) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NoSuchTagSet" { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "NoSuchTagSet" { return map[string]string{}, nil } else if err != nil { return nil, err diff --git a/builtin/providers/aws/resource_aws_glacier_vault_test.go b/builtin/providers/aws/resource_aws_glacier_vault_test.go new file mode 100644 index 000000000..fc5e150d9 --- /dev/null +++ b/builtin/providers/aws/resource_aws_glacier_vault_test.go @@ -0,0 +1,175 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/glacier" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSGlacierVault_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGlacierVault_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists("aws_glacier_vault.test"), + ), + }, + }, + }) +} + +func TestAccAWSGlacierVault_full(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGlacierVault_full, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists("aws_glacier_vault.full"), + ), + }, + }, + }) +} + +func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGlacierVault_full, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists("aws_glacier_vault.full"), + ), + }, + resource.TestStep{ + Config: testAccGlacierVault_withoutNotification, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists("aws_glacier_vault.full"), + testAccCheckVaultNotificationsMissing("aws_glacier_vault.full"), + ), + }, + }, + }) +} + +func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn + out, err := glacierconn.DescribeVault(&glacier.DescribeVaultInput{ + VaultName: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + if out.VaultARN == nil { + return fmt.Errorf("No Glacier Vault Found") + } + + if *out.VaultName != rs.Primary.ID { + return fmt.Errorf("Glacier Vault Mismatch - existing: %q, state: %q", + *out.VaultName, rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckVaultNotificationsMissing(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn + out, err := glacierconn.GetVaultNotifications(&glacier.GetVaultNotificationsInput{ + VaultName: aws.String(rs.Primary.ID), + }) + + if awserr, ok := err.(awserr.Error); ok && awserr.Code() != "ResourceNotFoundException" { + return fmt.Errorf("Expected ResourceNotFoundException for Vault %s Notification Block but got %s", rs.Primary.ID, awserr.Code()) + } + + if out.VaultNotificationConfig != nil { + return fmt.Errorf("Vault Notification Block has been found for %s", rs.Primary.ID) + } + + return nil + } + +} + +func testAccCheckGlacierVaultDestroy(s *terraform.State) error { + if len(s.RootModule().Resources) > 0 { + return fmt.Errorf("Expected all resources to be gone, but found: %#v", + s.RootModule().Resources) + } + + return nil +} + +const testAccGlacierVault_basic = ` +resource "aws_glacier_vault" "test" { + name = "my_test_vault" +} +` + +const testAccGlacierVault_full = ` +resource "aws_sns_topic" "aws_sns_topic" { + name = "glacier-sns-topic" +} + +resource "aws_glacier_vault" "full" { + name = "my_test_vault" + notification { + sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" + events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] + } + tags { + Test="Test1" + } +} +` + +const testAccGlacierVault_withoutNotification = ` +resource "aws_sns_topic" "aws_sns_topic" { + name = "glacier-sns-topic" +} + +resource "aws_glacier_vault" "full" { + name = "my_test_vault" + tags { + Test="Test1" + } +} +` diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index ad7e2a6d1..920bee4f5 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -10,14 +10,21 @@ description: |- Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-vaults.html) for a full explanation of the Glacier Vault functionality +~> **NOTE:** When trying to remove a Glacier Vault, the Vault must be empty. + ## Example Usage ``` + +resource "aws_sns_topic" "aws_sns_topic" { + name = "glacier-sns-topic" +} + resource "aws_glacier_vault" "my_archive" { name = "MyArchive" notification { - sns_topic = "arn:aws:sns:us-west-2:432981146916:MyArchiveTopic" + sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] } @@ -51,15 +58,15 @@ EOF The following arguments are supported: -* `name` - (Required) The name of the Vault. Names can be between 1 and 255 characters long and the valid characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period). -* `access_policy` - (Required) The policy document. This is a JSON formatted string. - The heredoc syntax or `file` function is helpful here. -* `notification` - (Required) The notifications for the Vault. Fields documented below. +* `name` - (Required) The name of the Vault. Names can be between 1 and 255 characters long and the valid characters are a-z, A-Z, 0-9, '\_' (underscore), '-' (hyphen), and '.' (period). +* `access_policy` - (Optional) The policy document. This is a JSON formatted string. + The heredoc syntax or `file` function is helpful here. Use the [Glacier Developer Guide](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html) for more information on Glacier Vault Policy +* `notification` - (Optional) The notifications for the Vault. Fields documented below. * `tags` - (Optional) A mapping of tags to assign to the resource. **notification** supports the following: -* `events` - (Required) You can configure a vault to public a notification for `ArchiveRetrievalCompleted` and `InventoryRetrievalCompleted` events. +* `events` - (Required) You can configure a vault to publish a notification for `ArchiveRetrievalCompleted` and `InventoryRetrievalCompleted` events. * `sns_topic` - (Required) The SNS Topic ARN. The following attributes are exported: From 9f01efae6f027ce6cd646ebb7c018f95a410104d Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 5 Oct 2015 11:24:09 +0100 Subject: [PATCH 200/220] Adding a test to make sure that the diffGlacierVaultTags func works as expected --- .../aws/resource_aws_glacier_vault_test.go | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/builtin/providers/aws/resource_aws_glacier_vault_test.go b/builtin/providers/aws/resource_aws_glacier_vault_test.go index fc5e150d9..4f5c26bf2 100644 --- a/builtin/providers/aws/resource_aws_glacier_vault_test.go +++ b/builtin/providers/aws/resource_aws_glacier_vault_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "reflect" "testing" "github.com/aws/aws-sdk-go/aws" @@ -67,6 +68,57 @@ func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) { }) } +func TestDiffGlacierVaultTags(t *testing.T) { + cases := []struct { + Old, New map[string]interface{} + Create map[string]string + Remove []string + }{ + // Basic add/remove + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "bar": "baz", + }, + Create: map[string]string{ + "bar": "baz", + }, + Remove: []string{ + "foo", + }, + }, + + // Modify + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "foo": "baz", + }, + Create: map[string]string{ + "foo": "baz", + }, + Remove: []string{ + "foo", + }, + }, + } + + for i, tc := range cases { + c, r := diffGlacierVaultTags(mapGlacierVaultTags(tc.Old), mapGlacierVaultTags(tc.New)) + + if !reflect.DeepEqual(c, tc.Create) { + t.Fatalf("%d: bad create: %#v", i, c) + } + if !reflect.DeepEqual(r, tc.Remove) { + t.Fatalf("%d: bad remove: %#v", i, r) + } + } +} + func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] From 2f42f58256b0a6f4e6921f2d1f8a1e63a0545442 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 13 Oct 2015 17:15:34 +0200 Subject: [PATCH 201/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a61e8af63..cfe4dd75a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ FEATURES: * **New resource: `aws_directory_service_directory`** [GH-3228] * **New resource: `aws_autoscaling_lifecycle_hook`** [GH-3351] * **New resource: `aws_placement_group`** [GH-3457] + * **New resource: `aws_glacier_vault`** [GH-3491] IMPROVEMENTS: From 95832c2fb217182c16f856edbed2d51d8e07719b Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 13 Oct 2015 12:56:53 -0500 Subject: [PATCH 202/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfe4dd75a..9458c1a73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ BUG FIXES: * provider/aws: Update Security Group Rules to Version 2 [GH-3019] * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] * provider/aws: Fix force_delete on autoscaling groups [GH-3485] + * provider/aws: Fix crash with VPC Peering connections [GH-3490] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From 43c7711ac89172ac45b365be60c81d757f765532 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 13 Oct 2015 18:21:21 +0200 Subject: [PATCH 203/220] docs/aws: Fix whitespacing in glacier_vault --- .../docs/providers/aws/r/glacier_vault.html.markdown | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index 920bee4f5..523260d72 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -10,24 +10,23 @@ description: |- Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-vaults.html) for a full explanation of the Glacier Vault functionality -~> **NOTE:** When trying to remove a Glacier Vault, the Vault must be empty. +~> **NOTE:** When trying to remove a Glacier Vault, the Vault must be empty. ## Example Usage ``` - resource "aws_sns_topic" "aws_sns_topic" { name = "glacier-sns-topic" } resource "aws_glacier_vault" "my_archive" { name = "MyArchive" - + notification { sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] } - + access_policy = < Date: Tue, 13 Oct 2015 22:52:11 +0200 Subject: [PATCH 204/220] docs: Make IAM policy doc canonical --- .../source/docs/providers/aws/r/glacier_vault.html.markdown | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index 523260d72..6805338c7 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -39,9 +39,7 @@ resource "aws_glacier_vault" "my_archive" { "glacier:InitiateJob", "glacier:GetJobOutput" ], - "Resource": [ - "arn:aws:glacier:eu-west-1:432981146916:vaults/MyArchive" - ] + "Resource": "arn:aws:glacier:eu-west-1:432981146916:vaults/MyArchive" } ] } From ef5b6e93a9d399b1b54d6d6207d7ed44e13f44f0 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 13 Oct 2015 16:57:11 -0500 Subject: [PATCH 205/220] provider/azure: fix issues loading config from homedir Issues were: * `settings_file` `ValidateFunc` needs to expand homedir just like the `configure` does, otherwise ~-based paths fail validation * `isFile` was being called before ~-expand so configure was failing as well * `Config` was swallowing error so provider was ending up with `nil`, resulting in crash To fix: * Consolidate settings_file path/contents handling into a single helper called from both `validate` and `configure` funcs * Return err from `Config` To cover: * Added test case to validate w/ tilde-path * Added configure test w/ tilde-path --- builtin/providers/azure/config.go | 2 +- builtin/providers/azure/provider.go | 58 ++++++++-------- builtin/providers/azure/provider_test.go | 85 +++++++++++++++++++++++- 3 files changed, 111 insertions(+), 34 deletions(-) diff --git a/builtin/providers/azure/config.go b/builtin/providers/azure/config.go index cbb23d58b..b096a10c4 100644 --- a/builtin/providers/azure/config.go +++ b/builtin/providers/azure/config.go @@ -98,7 +98,7 @@ func (c Client) getStorageServiceQueueClient(serviceName string) (storage.QueueS func (c *Config) NewClientFromSettingsData() (*Client, error) { mc, err := management.ClientFromPublishSettingsData(c.Settings, c.SubscriptionID) if err != nil { - return nil, nil + return nil, err } return &Client{ diff --git a/builtin/providers/azure/provider.go b/builtin/providers/azure/provider.go index fe100be35..975a93b00 100644 --- a/builtin/providers/azure/provider.go +++ b/builtin/providers/azure/provider.go @@ -64,22 +64,12 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { Certificate: []byte(d.Get("certificate").(string)), } - settings := d.Get("settings_file").(string) - - if settings != "" { - if ok, _ := isFile(settings); ok { - settingsFile, err := homedir.Expand(settings) - if err != nil { - return nil, fmt.Errorf("Error expanding the settings file path: %s", err) - } - publishSettingsContent, err := ioutil.ReadFile(settingsFile) - if err != nil { - return nil, fmt.Errorf("Error reading settings file: %s", err) - } - config.Settings = publishSettingsContent - } else { - config.Settings = []byte(settings) - } + settingsFile := d.Get("settings_file").(string) + if settingsFile != "" { + // any errors from readSettings would have been caught at the validate + // step, so we can avoid handling them now + settings, _, _ := readSettings(settingsFile) + config.Settings = settings return config.NewClientFromSettingsData() } @@ -92,31 +82,39 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { "or both a 'subscription_id' and 'certificate'.") } -func validateSettingsFile(v interface{}, k string) (warnings []string, errors []error) { +func validateSettingsFile(v interface{}, k string) ([]string, []error) { value := v.(string) - if value == "" { - return + return nil, nil } - var settings settingsData - if err := xml.Unmarshal([]byte(value), &settings); err != nil { - warnings = append(warnings, ` + _, warnings, errors := readSettings(value) + return warnings, errors +} + +const settingsPathWarnMsg = ` settings_file is not valid XML, so we are assuming it is a file path. This support will be removed in the future. Please update your configuration to use -${file("filename.publishsettings")} instead.`) - } else { +${file("filename.publishsettings")} instead.` + +func readSettings(pathOrContents string) (s []byte, ws []string, es []error) { + var settings settingsData + if err := xml.Unmarshal([]byte(pathOrContents), &settings); err == nil { + s = []byte(pathOrContents) return } - if ok, err := isFile(value); !ok { - errors = append(errors, - fmt.Errorf( - "account_file path could not be read from '%s': %s", - value, - err)) + ws = append(ws, settingsPathWarnMsg) + path, err := homedir.Expand(pathOrContents) + if err != nil { + es = append(es, fmt.Errorf("Error expanding path: %s", err)) + return } + s, err = ioutil.ReadFile(path) + if err != nil { + es = append(es, fmt.Errorf("Could not read file '%s': %s", path, err)) + } return } diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go index 5c720640f..b3feb8392 100644 --- a/builtin/providers/azure/provider_test.go +++ b/builtin/providers/azure/provider_test.go @@ -3,12 +3,14 @@ package azure import ( "io" "io/ioutil" - "log" "os" + "strings" "testing" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/go-homedir" ) var testAccProviders map[string]terraform.ResourceProvider @@ -67,20 +69,33 @@ func TestAzure_validateSettingsFile(t *testing.T) { if err != nil { t.Fatalf("Error creating temporary file in TestAzure_validateSettingsFile: %s", err) } + defer os.Remove(f.Name()) fx, err := ioutil.TempFile("", "tf-test-xml") if err != nil { t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err) } + defer os.Remove(fx.Name()) + + home, err := homedir.Dir() + if err != nil { + t.Fatalf("Error fetching homedir: %s", err) + } + fh, err := ioutil.TempFile(home, "tf-test-home") + if err != nil { + t.Fatalf("Error creating homedir-based temporary file: %s", err) + } + defer os.Remove(fh.Name()) _, err = io.WriteString(fx, "") if err != nil { t.Fatalf("Error writing XML File: %s", err) } - - log.Printf("fx name: %s", fx.Name()) fx.Close() + r := strings.NewReplacer(home, "~") + homePath := r.Replace(fh.Name()) + cases := []struct { Input string // String of XML or a path to an XML file W int // expected count of warnings @@ -89,6 +104,7 @@ func TestAzure_validateSettingsFile(t *testing.T) { {"test", 1, 1}, {f.Name(), 1, 0}, {fx.Name(), 1, 0}, + {homePath, 1, 0}, {"", 0, 0}, } @@ -104,6 +120,53 @@ func TestAzure_validateSettingsFile(t *testing.T) { } } +func TestAzure_providerConfigure(t *testing.T) { + home, err := homedir.Dir() + if err != nil { + t.Fatalf("Error fetching homedir: %s", err) + } + fh, err := ioutil.TempFile(home, "tf-test-home") + if err != nil { + t.Fatalf("Error creating homedir-based temporary file: %s", err) + } + defer os.Remove(fh.Name()) + + _, err = io.WriteString(fh, testAzurePublishSettingsStr) + if err != nil { + t.Fatalf("err: %s", err) + } + fh.Close() + + r := strings.NewReplacer(home, "~") + homePath := r.Replace(fh.Name()) + + cases := []struct { + SettingsFile string // String of XML or a path to an XML file + NilMeta bool // whether meta is expected to be nil + }{ + {testAzurePublishSettingsStr, false}, + {homePath, false}, + } + + for _, tc := range cases { + rp := Provider() + raw := map[string]interface{}{ + "settings_file": tc.SettingsFile, + } + + rawConfig, err := config.NewRawConfig(raw) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = rp.Configure(terraform.NewResourceConfig(rawConfig)) + meta := rp.(*schema.Provider).Meta() + if (meta == nil) != tc.NilMeta { + t.Fatalf("expected NilMeta: %t, got meta: %#v", tc.NilMeta, meta) + } + } +} + func TestAzure_isFile(t *testing.T) { f, err := ioutil.TempFile("", "tf-test-file") if err != nil { @@ -129,3 +192,19 @@ func TestAzure_isFile(t *testing.T) { } } } + +// testAzurePublishSettingsStr is a revoked publishsettings file +const testAzurePublishSettingsStr = ` + + + + + + +` From 8d017be63724fb6bc9b237de5196b9b88b3c4523 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 10:35:40 -0500 Subject: [PATCH 206/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9458c1a73..6dfefc419 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ IMPROVEMENTS: * provider/aws: Add validation for `db_parameter_group.name` [GH-3279] * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. [GH-3200] * provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397] + * provider/aws: Configurable capacity waiting duration for ASGs [GH-3191] * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035] * provider/openstack: add functionality to attach FloatingIP to Port [GH-1788] * provider/google: Can now do multi-region deployments without using multiple providers [GH-3258] From 6d2fee9c28831251aace76ea8f5653ba0c5510b1 Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 14 Oct 2015 18:06:09 +0100 Subject: [PATCH 207/220] After the DynamoDB table is created, the ARN wasn't being set --- builtin/providers/aws/resource_aws_dynamodb_table.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go index df043ffe0..b322ad897 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table.go @@ -287,6 +287,10 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er } else { // No error, set ID and return d.SetId(*output.TableDescription.TableName) + if err := d.Set("arn", *output.TableDescription.TableArn); err != nil { + return err + } + return nil } } From 12625997c1124f5bc410d1d311d6b49909ead03f Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 25 Sep 2015 17:48:08 -0400 Subject: [PATCH 208/220] Added global address & tests --- builtin/providers/google/provider.go | 1 + .../google/resource_compute_global_address.go | 100 ++++++++++++++++++ .../resource_compute_global_address_test.go | 81 ++++++++++++++ .../r/compute_global_address.html.markdown | 37 +++++++ 4 files changed, 219 insertions(+) create mode 100644 builtin/providers/google/resource_compute_global_address.go create mode 100644 builtin/providers/google/resource_compute_global_address_test.go create mode 100644 website/source/docs/providers/google/r/compute_global_address.html.markdown diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 7c9587219..87a299d81 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -40,6 +40,7 @@ func Provider() terraform.ResourceProvider { "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go new file mode 100644 index 000000000..0d19bdfcf --- /dev/null +++ b/builtin/providers/google/resource_compute_global_address.go @@ -0,0 +1,100 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeGlobalAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalAddressCreate, + Read: resourceComputeGlobalAddressRead, + Delete: resourceComputeGlobalAddressDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the address parameter + addr := &compute.Address{Name: d.Get("name").(string)} + op, err := config.clientCompute.GlobalAddresses.Insert( + config.Project, addr).Do() + if err != nil { + return fmt.Errorf("Error creating address: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(addr.Name) + + err = resourceOperationWaitGlobal(config, op, "Creating Global Address") + if err != nil { + return err + } + + return resourceComputeGlobalAddressRead(d, meta) +} + +func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + addr, err := config.clientCompute.GlobalAddresses.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading address: %s", err) + } + + d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) + + return nil +} + +func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the address + log.Printf("[DEBUG] address delete request") + op, err := config.clientCompute.GlobalAddresses.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting address: %s", err) + } + + err = resourceOperationWaitGlobal(config, op, "Deletingg Global Address") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_global_address_test.go b/builtin/providers/google/resource_compute_global_address_test.go new file mode 100644 index 000000000..2ef7b97ea --- /dev/null +++ b/builtin/providers/google/resource_compute_global_address_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeGlobalAddress_basic(t *testing.T) { + var addr compute.Address + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalAddress_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalAddressExists( + "google_compute_global_address.foobar", &addr), + ), + }, + }, + }) +} + +func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_global_address" { + continue + } + + _, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Address still exists") + } + } + + return nil +} + +func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Addr not found") + } + + *addr = *found + + return nil + } +} + +const testAccComputeGlobalAddress_basic = ` +resource "google_compute_global_address" "foobar" { + name = "terraform-test" +}` diff --git a/website/source/docs/providers/google/r/compute_global_address.html.markdown b/website/source/docs/providers/google/r/compute_global_address.html.markdown new file mode 100644 index 000000000..1fdb24e6d --- /dev/null +++ b/website/source/docs/providers/google/r/compute_global_address.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "google" +page_title: "Google: google_compute_global_address" +sidebar_current: "docs-google-resource-global-address" +description: |- + Creates a static global IP address resource for a Google Compute Engine project. +--- + +# google\_compute\_global\_address + +Creates a static IP address resource global to a for Google Compute Engine project. For more information see +[the official documentation](https://cloud.google.com/compute/docs/instances-and-network) and +[API](https://cloud.google.com/compute/docs/reference/latest/globalAddresses). + + +## Example Usage + +``` +resource "google_compute_global_address" "default" { + name = "test-address" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) A unique name for the resource, required by GCE. + Changing this forces a new resource to be created. + +## Attributes Reference + +The following attributes are exported: + +* `name` - The name of the resource. +* `address` - The IP address that was allocated. +* `self_link` - The URI of the created resource. From b7f7c7a7315a2e6c355f45c330c9d59ebc9c0e36 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Wed, 14 Oct 2015 13:17:08 -0400 Subject: [PATCH 209/220] Provider GCE, fixed metadata state update bug --- builtin/providers/google/provider.go | 1 - .../google/resource_compute_global_address.go | 100 ------------------ .../resource_compute_global_address_test.go | 81 -------------- .../google/resource_compute_instance.go | 18 +++- .../google/resource_compute_instance_test.go | 2 +- .../r/compute_global_address.html.markdown | 37 ------- 6 files changed, 18 insertions(+), 221 deletions(-) delete mode 100644 builtin/providers/google/resource_compute_global_address.go delete mode 100644 builtin/providers/google/resource_compute_global_address_test.go delete mode 100644 website/source/docs/providers/google/r/compute_global_address.html.markdown diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 87a299d81..7c9587219 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -40,7 +40,6 @@ func Provider() terraform.ResourceProvider { "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), - "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go deleted file mode 100644 index 0d19bdfcf..000000000 --- a/builtin/providers/google/resource_compute_global_address.go +++ /dev/null @@ -1,100 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -func resourceComputeGlobalAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalAddressCreate, - Read: resourceComputeGlobalAddressRead, - Delete: resourceComputeGlobalAddressDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Build the address parameter - addr := &compute.Address{Name: d.Get("name").(string)} - op, err := config.clientCompute.GlobalAddresses.Insert( - config.Project, addr).Do() - if err != nil { - return fmt.Errorf("Error creating address: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(addr.Name) - - err = resourceOperationWaitGlobal(config, op, "Creating Global Address") - if err != nil { - return err - } - - return resourceComputeGlobalAddressRead(d, meta) -} - -func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - addr, err := config.clientCompute.GlobalAddresses.Get( - config.Project, d.Id()).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading address: %s", err) - } - - d.Set("address", addr.Address) - d.Set("self_link", addr.SelfLink) - - return nil -} - -func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Delete the address - log.Printf("[DEBUG] address delete request") - op, err := config.clientCompute.GlobalAddresses.Delete( - config.Project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting address: %s", err) - } - - err = resourceOperationWaitGlobal(config, op, "Deletingg Global Address") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_global_address_test.go b/builtin/providers/google/resource_compute_global_address_test.go deleted file mode 100644 index 2ef7b97ea..000000000 --- a/builtin/providers/google/resource_compute_global_address_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeGlobalAddress_basic(t *testing.T) { - var addr compute.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeGlobalAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeGlobalAddress_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeGlobalAddressExists( - "google_compute_global_address.foobar", &addr), - ), - }, - }, - }) -} - -func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_global_address" { - continue - } - - _, err := config.clientCompute.GlobalAddresses.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Address still exists") - } - } - - return nil -} - -func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.GlobalAddresses.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Addr not found") - } - - *addr = *found - - return nil - } -} - -const testAccComputeGlobalAddress_basic = ` -resource "google_compute_global_address" "foobar" { - name = "terraform-test" -}` diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 52575767e..229d1b05e 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -515,10 +515,17 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error // Synch metadata md := instance.Metadata - if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil { + _md := MetadataFormatSchema(md) + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + d.Set("metadata_startup_script", script) + delete(_md, "startup-script") + } + + if err = d.Set("metadata", _md); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } + d.Set("can_ip_forward", instance.CanIpForward) // Set the service accounts @@ -635,6 +642,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } d.Set("self_link", instance.SelfLink) + d.SetId(instance.Name) return nil } @@ -655,6 +663,14 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // If the Metadata has changed, then update that. if d.HasChange("metadata") { o, n := d.GetChange("metadata") + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if _, ok := n.(map[string]interface{})["startup-script"]; ok { + return fmt.Errorf("Only one of metadata.startup-script and metadata_startup_script may be defined") + } + + n.(map[string]interface{})["startup-script"] = script + } + updateMD := func() error { // Reload the instance in the case of a fingerprint mismatch diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go index 61c4906a2..f59da73ef 100644 --- a/builtin/providers/google/resource_compute_instance_test.go +++ b/builtin/providers/google/resource_compute_instance_test.go @@ -32,7 +32,7 @@ func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { }) } -func TestAccComputeInstance_basic(t *testing.T) { +func TestAccComputeInstance_basic1(t *testing.T) { var instance compute.Instance resource.Test(t, resource.TestCase{ diff --git a/website/source/docs/providers/google/r/compute_global_address.html.markdown b/website/source/docs/providers/google/r/compute_global_address.html.markdown deleted file mode 100644 index 1fdb24e6d..000000000 --- a/website/source/docs/providers/google/r/compute_global_address.html.markdown +++ /dev/null @@ -1,37 +0,0 @@ ---- -layout: "google" -page_title: "Google: google_compute_global_address" -sidebar_current: "docs-google-resource-global-address" -description: |- - Creates a static global IP address resource for a Google Compute Engine project. ---- - -# google\_compute\_global\_address - -Creates a static IP address resource global to a for Google Compute Engine project. For more information see -[the official documentation](https://cloud.google.com/compute/docs/instances-and-network) and -[API](https://cloud.google.com/compute/docs/reference/latest/globalAddresses). - - -## Example Usage - -``` -resource "google_compute_global_address" "default" { - name = "test-address" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) A unique name for the resource, required by GCE. - Changing this forces a new resource to be created. - -## Attributes Reference - -The following attributes are exported: - -* `name` - The name of the resource. -* `address` - The IP address that was allocated. -* `self_link` - The URI of the created resource. From 7af484c8f6e9aca7792877a55655c68351fb5910 Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 14 Oct 2015 19:16:58 +0100 Subject: [PATCH 210/220] Changing the DynamoDb Create to do a Read at the end --- builtin/providers/aws/resource_aws_dynamodb_table.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go index b322ad897..c88f50d8a 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table.go @@ -291,7 +291,7 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er return err } - return nil + return resourceAwsDynamoDbTableRead(d, meta) } } From 4fb7ae6600ceef8ca5cdb554f9ae7057d412b92f Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 14 Oct 2015 13:55:19 -0500 Subject: [PATCH 211/220] rename test so it can be ran in isolation --- builtin/providers/aws/resource_aws_s3_bucket_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_test.go b/builtin/providers/aws/resource_aws_s3_bucket_test.go index e494816b3..1ce05583c 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_test.go @@ -64,7 +64,7 @@ func TestAccAWSS3Bucket_Policy(t *testing.T) { }) } -func TestAccAWSS3Bucket_Website(t *testing.T) { +func TestAccAWSS3Bucket_Website_Simple(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, From f9c577aa2ad1646e1a529d3cb23355e4ce8c2c0f Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 14 Oct 2015 13:55:37 -0500 Subject: [PATCH 212/220] update requirement for peer test --- .../providers/aws/resource_aws_vpc_peering_connection_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go index 8f7360250..ca92ce66a 100644 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go +++ b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go @@ -36,6 +36,7 @@ func TestAccAWSVPCPeeringConnection_basic(t *testing.T) { func TestAccAWSVPCPeeringConnection_tags(t *testing.T) { var connection ec2.VpcPeeringConnection + peerId := os.Getenv("TF_PEER_ID") resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -43,7 +44,7 @@ func TestAccAWSVPCPeeringConnection_tags(t *testing.T) { CheckDestroy: testAccCheckVpcDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccVpcPeeringConfigTags, + Config: fmt.Sprintf(testAccVpcPeeringConfigTags, peerId), Check: resource.ComposeTestCheckFunc( testAccCheckAWSVpcPeeringConnectionExists("aws_vpc_peering_connection.foo", &connection), testAccCheckTags(&connection.Tags, "foo", "bar"), @@ -133,6 +134,7 @@ resource "aws_vpc" "bar" { resource "aws_vpc_peering_connection" "foo" { vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" + peer_owner_id = "%s" tags { foo = "bar" } From 6ab339b62dad8fd56ebc4bad2136bd062c0ab138 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 14 Oct 2015 14:49:33 -0500 Subject: [PATCH 213/220] unset website_endpoint, website_domain if website part is removed --- builtin/providers/aws/resource_aws_s3_bucket.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index a329d4ff6..b45f69cc4 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -464,6 +464,9 @@ func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) err return fmt.Errorf("Error deleting S3 website: %s", err) } + d.Set("website_endpoint", "") + d.Set("website_domain", "") + return nil } From 2a179d10657d58cc8ea63f7700f3a493a1c2e1a2 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 13:44:28 -0500 Subject: [PATCH 214/220] helper/schema: ValidateFunc support for maps --- helper/schema/schema.go | 15 +++++++++++++-- helper/schema/schema_test.go | 2 +- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/helper/schema/schema.go b/helper/schema/schema.go index 34145b136..f4d860995 100644 --- a/helper/schema/schema.go +++ b/helper/schema/schema.go @@ -540,8 +540,8 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { if v.ValidateFunc != nil { switch v.Type { - case TypeList, TypeSet, TypeMap: - return fmt.Errorf("ValidateFunc is only supported on primitives.") + case TypeList, TypeSet: + return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.") } } } @@ -1118,6 +1118,17 @@ func (m schemaMap) validateMap( } } + if schema.ValidateFunc != nil { + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.ValidateFunc(validatableMap, k) + } + return nil, nil } diff --git a/helper/schema/schema_test.go b/helper/schema/schema_test.go index faf703b0f..09eeef119 100644 --- a/helper/schema/schema_test.go +++ b/helper/schema/schema_test.go @@ -2903,7 +2903,7 @@ func TestSchemaMap_InternalValidate(t *testing.T) { { map[string]*Schema{ "foo": &Schema{ - Type: TypeMap, + Type: TypeSet, Required: true, ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { return From ab0534a356208ade7b5cba5096c215e0ac84f4c4 Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 14 Oct 2015 16:27:05 -0500 Subject: [PATCH 215/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6dfefc419..61f9785a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ IMPROVEMENTS: * provider/aws: Add `configuation_endpoint` to `aws_elasticache_cluster` [GH-3250] * provider/aws: Add validation for `app_cookie_stickiness_policy.name` [GH-3277] * provider/aws: Add validation for `db_parameter_group.name` [GH-3279] + * provider/aws: Set DynamoDB Table ARN after creation [GH-3500] * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. [GH-3200] * provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397] * provider/aws: Configurable capacity waiting duration for ASGs [GH-3191] From a1939e70f7fc806532e977adfdf2356c525c8c2a Mon Sep 17 00:00:00 2001 From: Rob Zienert Date: Sun, 9 Aug 2015 03:02:28 -0500 Subject: [PATCH 216/220] Adding ignore_changes lifecycle meta property --- .gitignore | 1 + config/config.go | 5 +- config/loader_test.go | 57 +++++++++++++++++++ config/test-fixtures/ignore-changes.tf | 17 ++++++ terraform/context_plan_test.go | 46 +++++++++++++++ terraform/eval_ignore_changes.go | 32 +++++++++++ terraform/terraform_test.go | 13 +++++ .../test-fixtures/plan-ignore-changes/main.tf | 9 +++ terraform/transform_resource.go | 4 ++ .../docs/configuration/resources.html.md | 11 ++++ 10 files changed, 193 insertions(+), 2 deletions(-) create mode 100644 config/test-fixtures/ignore-changes.tf create mode 100644 terraform/eval_ignore_changes.go create mode 100644 terraform/test-fixtures/plan-ignore-changes/main.tf diff --git a/.gitignore b/.gitignore index 314611940..66ea31701 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ website/node_modules *.bak *~ .*.swp +.idea diff --git a/config/config.go b/config/config.go index c088414da..d31777f6e 100644 --- a/config/config.go +++ b/config/config.go @@ -84,8 +84,9 @@ type Resource struct { // ResourceLifecycle is used to store the lifecycle tuning parameters // to allow customized behavior type ResourceLifecycle struct { - CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` - PreventDestroy bool `mapstructure:"prevent_destroy"` + CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` + PreventDestroy bool `mapstructure:"prevent_destroy"` + IgnoreChanges []string `mapstructure:"ignore_changes"` } // Provisioner is a configured provisioner step on a resource. diff --git a/config/loader_test.go b/config/loader_test.go index d239bd0b9..eaf4f10aa 100644 --- a/config/loader_test.go +++ b/config/loader_test.go @@ -440,6 +440,54 @@ func TestLoadFile_createBeforeDestroy(t *testing.T) { } } +func TestLoadFile_ignoreChanges(t *testing.T) { + c, err := LoadFile(filepath.Join(fixtureDir, "ignore-changes.tf")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if c == nil { + t.Fatal("config should not be nil") + } + + actual := resourcesStr(c.Resources) + print(actual) + if actual != strings.TrimSpace(ignoreChangesResourcesStr) { + t.Fatalf("bad:\n%s", actual) + } + + // Check for the flag value + r := c.Resources[0] + if r.Name != "web" && r.Type != "aws_instance" { + t.Fatalf("Bad: %#v", r) + } + + // Should populate ignore changes + if len(r.Lifecycle.IgnoreChanges) == 0 { + t.Fatalf("Bad: %#v", r) + } + + r = c.Resources[1] + if r.Name != "bar" && r.Type != "aws_instance" { + t.Fatalf("Bad: %#v", r) + } + + // Should not populate ignore changes + if len(r.Lifecycle.IgnoreChanges) > 0 { + t.Fatalf("Bad: %#v", r) + } + + r = c.Resources[2] + if r.Name != "baz" && r.Type != "aws_instance" { + t.Fatalf("Bad: %#v", r) + } + + // Should not populate ignore changes + if len(r.Lifecycle.IgnoreChanges) > 0 { + t.Fatalf("Bad: %#v", r) + } +} + func TestLoad_preventDestroyString(t *testing.T) { c, err := LoadFile(filepath.Join(fixtureDir, "prevent-destroy-string.tf")) if err != nil { @@ -676,3 +724,12 @@ aws_instance[bar] (x1) aws_instance[web] (x1) ami ` + +const ignoreChangesResourcesStr = ` +aws_instance[bar] (x1) + ami +aws_instance[baz] (x1) + ami +aws_instance[web] (x1) + ami +` diff --git a/config/test-fixtures/ignore-changes.tf b/config/test-fixtures/ignore-changes.tf new file mode 100644 index 000000000..765a05798 --- /dev/null +++ b/config/test-fixtures/ignore-changes.tf @@ -0,0 +1,17 @@ +resource "aws_instance" "web" { + ami = "foo" + lifecycle { + ignore_changes = ["ami"] + } +} + +resource "aws_instance" "bar" { + ami = "foo" + lifecycle { + ignore_changes = [] + } +} + +resource "aws_instance" "baz" { + ami = "foo" +} diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 50f2bb471..db6f24577 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -1672,3 +1672,49 @@ func TestContext2Plan_varListErr(t *testing.T) { t.Fatal("should error") } } + +func TestContext2Plan_ignoreChanges(t *testing.T) { + m := testModule(t, "plan-ignore-changes") + p := testProvider("aws") + p.DiffFn = testDiffFn + s := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo": &ResourceState{ + Primary: &InstanceState{ + ID: "bar", + Attributes: map[string]string{"ami": "ami-abcd1234"}, + }, + }, + }, + }, + }, + } + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Variables: map[string]string{ + "foo": "ami-1234abcd", + }, + State: s, + }) + + plan, err := ctx.Plan() + if err != nil { + t.Fatalf("err: %s", err) + } + + if len(plan.Diff.RootModule().Resources) < 1 { + t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) + } + + actual := strings.TrimSpace(plan.String()) + expected := strings.TrimSpace(testTerraformPlanIgnoreChangesStr) + if actual != expected { + t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) + } +} diff --git a/terraform/eval_ignore_changes.go b/terraform/eval_ignore_changes.go new file mode 100644 index 000000000..1a44089a9 --- /dev/null +++ b/terraform/eval_ignore_changes.go @@ -0,0 +1,32 @@ +package terraform +import ( + "github.com/hashicorp/terraform/config" + "strings" +) + +// EvalIgnoreChanges is an EvalNode implementation that removes diff +// attributes if their name matches names provided by the resource's +// IgnoreChanges lifecycle. +type EvalIgnoreChanges struct { + Resource *config.Resource + Diff **InstanceDiff +} + +func (n *EvalIgnoreChanges) Eval(ctx EvalContext) (interface{}, error) { + if n.Diff == nil || *n.Diff == nil || n.Resource == nil || n.Resource.Id() == "" { + return nil, nil + } + + diff := *n.Diff + ignoreChanges := n.Resource.Lifecycle.IgnoreChanges + + for _, ignoredName := range ignoreChanges { + for name := range diff.Attributes { + if strings.HasPrefix(name, ignoredName) { + delete(diff.Attributes, name) + } + } + } + + return nil, nil +} diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index c84e9803c..02d4de2a2 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -1286,3 +1286,16 @@ STATE: ` + +const testTerraformPlanIgnoreChangesStr = ` +DIFF: + +UPDATE: aws_instance.foo + type: "" => "aws_instance" + +STATE: + +aws_instance.foo: + ID = bar + ami = ami-abcd1234 +` diff --git a/terraform/test-fixtures/plan-ignore-changes/main.tf b/terraform/test-fixtures/plan-ignore-changes/main.tf new file mode 100644 index 000000000..056256a1d --- /dev/null +++ b/terraform/test-fixtures/plan-ignore-changes/main.tf @@ -0,0 +1,9 @@ +variable "foo" {} + +resource "aws_instance" "foo" { + ami = "${var.foo}" + + lifecycle { + ignore_changes = ["ami"] + } +} diff --git a/terraform/transform_resource.go b/terraform/transform_resource.go index a52b3ba72..81ff158d9 100644 --- a/terraform/transform_resource.go +++ b/terraform/transform_resource.go @@ -318,6 +318,10 @@ func (n *graphNodeExpandedResource) EvalTree() EvalNode { Resource: n.Resource, Diff: &diff, }, + &EvalIgnoreChanges{ + Resource: n.Resource, + Diff: &diff, + }, &EvalWriteState{ Name: n.stateId(), ResourceType: n.Resource.Type, diff --git a/website/source/docs/configuration/resources.html.md b/website/source/docs/configuration/resources.html.md index f099c5f25..d5e087fec 100644 --- a/website/source/docs/configuration/resources.html.md +++ b/website/source/docs/configuration/resources.html.md @@ -68,11 +68,20 @@ The `lifecycle` block allows the following keys to be set: destruction of a given resource. When this is set to `true`, any plan that includes a destroy of this resource will return an error message. + * `ignore_changes` (list of strings) - Customizes how diffs are evaluated for + resources, allowing individual attributes to be ignored through changes. + As an example, this can be used to ignore dynamic changes to the + resource from external resources. Other meta-parameters cannot be ignored. + ~> **NOTE on create\_before\_destroy and dependencies:** Resources that utilize the `create_before_destroy` key can only depend on other resources that also include `create_before_destroy`. Referencing a resource that does not include `create_before_destroy` will result in a dependency graph cycle. +~> **NOTE on ignore\_changes:** Ignored attribute names can be matched by their +name, not state ID. For example, if an `aws_route_table` has two routes defined +and the `ignore_changes` list contains "route", both routes will be ignored. + ------------- Within a resource, you can optionally have a **connection block**. @@ -191,6 +200,8 @@ where `LIFECYCLE` is: ``` lifecycle { [create_before_destroy = true|false] + [prevent_destroy = true|false] + [ignore_changes = [ATTRIBUTE NAME, ...]] } ``` From 4f4c572aa4394d0bd5ff509e09c4cdff5f0d7626 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 18:23:14 -0500 Subject: [PATCH 217/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61f9785a0..0a4437768 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ FEATURES: * **New resource: `aws_autoscaling_lifecycle_hook`** [GH-3351] * **New resource: `aws_placement_group`** [GH-3457] * **New resource: `aws_glacier_vault`** [GH-3491] + * **New lifecycle flag: `ignore_changes`** [GH-2525] IMPROVEMENTS: From 4f400a1944186ab2c7f057343d7a45b94b13dd71 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 13:17:44 -0500 Subject: [PATCH 218/220] provider/google: one more fix to GCE metadata In #3501 @lwander got us almost all the way there, but we still had tests failing. This seemed to be because GCE sets `metadata.startup-script` to a blank string on instance creation, and if a user specifies any `metadata` in their config this is seen as the desired full contents of metadata, so we get a diff trying to remove `startup-script`. Here, to address this, we just proactively remove the "startup-script" key from `Read`, and then we enforce that "metadata_startup_script" is the only way to configure startup scripts on instances. --- .../google/resource_compute_instance.go | 30 +++++++++++-------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 229d1b05e..68b8aed35 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -197,9 +197,10 @@ func resourceComputeInstance() *schema.Resource { }, "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + ValidateFunc: validateInstanceMetadata, }, "service_account": &schema.Schema{ @@ -516,16 +517,16 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error md := instance.Metadata _md := MetadataFormatSchema(md) + delete(_md, "startup-script") + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { d.Set("metadata_startup_script", script) - delete(_md, "startup-script") } if err = d.Set("metadata", _md); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } - d.Set("can_ip_forward", instance.CanIpForward) // Set the service accounts @@ -671,7 +672,6 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err n.(map[string]interface{})["startup-script"] = script } - updateMD := func() error { // Reload the instance in the case of a fingerprint mismatch instance, err = getInstance(config, d) @@ -810,13 +810,8 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) { m := &compute.Metadata{} mdMap := d.Get("metadata").(map[string]interface{}) - _, mapScriptExists := mdMap["startup-script"] - dScript, dScriptExists := d.GetOk("metadata_startup_script") - if mapScriptExists && dScriptExists { - return nil, fmt.Errorf("Not allowed to have both metadata_startup_script and metadata.startup-script") - } - if dScriptExists { - mdMap["startup-script"] = dScript + if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { + mdMap["startup-script"] = v } if len(mdMap) > 0 { m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) @@ -852,3 +847,12 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } + +func validateInstanceMetadata(v interface{}, k string) (ws []string, es []error) { + mdMap := v.(map[string]interface{}) + if _, ok := mdMap["startup-script"]; ok { + es = append(es, fmt.Errorf( + "Use metadata_startup_script instead of a startup-script key in %q.", k)) + } + return +} From beff2ff4600d205cf2b549a4537f5dbec9ff62ea Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 14 Oct 2015 19:35:20 -0700 Subject: [PATCH 219/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a4437768..a29895aca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ IMPROVEMENTS: * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. [GH-3200] * provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397] * provider/aws: Configurable capacity waiting duration for ASGs [GH-3191] + * provider/aws: Allow non-persistent Spot Requests [GH-3311] * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035] * provider/openstack: add functionality to attach FloatingIP to Port [GH-1788] * provider/google: Can now do multi-region deployments without using multiple providers [GH-3258] From 2e3b3cfad210b07cf77ae38d9495d2bb9bf8c1c7 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 14 Oct 2015 19:37:33 -0700 Subject: [PATCH 220/220] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a29895aca..9a7d91318 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ IMPROVEMENTS: * provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397] * provider/aws: Configurable capacity waiting duration for ASGs [GH-3191] * provider/aws: Allow non-persistent Spot Requests [GH-3311] + * provider/aws: Support tags for AWS DB subnet group [GH-3138] * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035] * provider/openstack: add functionality to attach FloatingIP to Port [GH-1788] * provider/google: Can now do multi-region deployments without using multiple providers [GH-3258]