From f9dd42ddce62453ab5700b92caa9b5ac9d76d000 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 14 Nov 2015 19:47:37 +0000 Subject: [PATCH] provider/openstack: Add State Change support to LBaaS Resources This commit adds State Change support to the LBaaS resources which should help with clean terminations. It also adds an acceptance tests that builds out a 2-node load balance service. --- .../resource_openstack_lb_monitor_v1.go | 87 ++++++++++++- .../resource_openstack_lb_pool_v1.go | 81 +++++++++++- .../resource_openstack_lb_pool_v1_test.go | 120 ++++++++++++++++++ .../openstack/resource_openstack_lb_vip_v1.go | 80 +++++++++++- 4 files changed, 365 insertions(+), 3 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go index 8774dadca..bca89a2d4 100644 --- a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go @@ -4,8 +4,12 @@ import ( "fmt" "log" "strconv" + "time" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" + + "github.com/rackspace/gophercloud" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" ) @@ -108,6 +112,22 @@ func resourceLBMonitorV1Create(d *schema.ResourceData, meta interface{}) error { } log.Printf("[INFO] LB Monitor ID: %s", m.ID) + log.Printf("[DEBUG] Waiting for OpenStack LB Monitor (%s) to become available.", m.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: "ACTIVE", + Refresh: waitForLBMonitorActive(networkingClient, m.ID), + Timeout: 2 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + d.SetId(m.ID) return resourceLBMonitorV1Read(d, meta) @@ -184,7 +204,16 @@ func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating OpenStack networking client: %s", err) } - err = monitors.Delete(networkingClient, d.Id()).ExtractErr() + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING"}, + Target: "DELETED", + Refresh: waitForLBMonitorDelete(networkingClient, d.Id()), + Timeout: 2 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf("Error deleting OpenStack LB Monitor: %s", err) } @@ -192,3 +221,59 @@ func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error { d.SetId("") return nil } + +func waitForLBMonitorActive(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + m, err := monitors.Get(networkingClient, monitorId).Extract() + if err != nil { + return nil, "", err + } + + // The monitor resource has no Status attribute, so a successful Get is the best we can do + log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) + return m, "ACTIVE", nil + } +} + +func waitForLBMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB Monitor %s", monitorId) + + m, err := monitors.Get(networkingClient, monitorId).Extract() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return m, "ACTIVE", err + } + if errCode.Actual == 404 { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) + return m, "DELETED", nil + } + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) + return m, "PENDING", nil + } + } + + log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) + err = monitors.Delete(networkingClient, monitorId).ExtractErr() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return m, "ACTIVE", err + } + if errCode.Actual == 404 { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) + return m, "DELETED", nil + } + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) + return m, "PENDING", nil + } + } + + log.Printf("[DEBUG] OpenStack LB Monitor %s still active.", monitorId) + return m, "ACTIVE", nil + } + +} diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go index 64e0436db..21177fbf2 100644 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go @@ -4,9 +4,13 @@ import ( "bytes" "fmt" "log" + "time" "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" + + "github.com/rackspace/gophercloud" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools" "github.com/rackspace/gophercloud/pagination" @@ -123,6 +127,21 @@ func resourceLBPoolV1Create(d *schema.ResourceData, meta interface{}) error { } log.Printf("[INFO] LB Pool ID: %s", p.ID) + log.Printf("[DEBUG] Waiting for OpenStack LB pool (%s) to become available.", p.ID) + + stateConf := &resource.StateChangeConf{ + Target: "ACTIVE", + Refresh: waitForLBPoolActive(networkingClient, p.ID), + Timeout: 2 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + d.SetId(p.ID) if mIDs := resourcePoolMonitorIDsV1(d); mIDs != nil { @@ -273,7 +292,16 @@ func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating OpenStack networking client: %s", err) } - err = pools.Delete(networkingClient, d.Id()).ExtractErr() + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: "DELETED", + Refresh: waitForLBPoolDelete(networkingClient, d.Id()), + Timeout: 2 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf("Error deleting OpenStack LB Pool: %s", err) } @@ -326,3 +354,54 @@ func resourceLBMemberV1Hash(v interface{}) int { return hashcode.String(buf.String()) } + +func waitForLBPoolActive(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + p, err := pools.Get(networkingClient, poolId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) + if p.Status == "ACTIVE" { + return p, "ACTIVE", nil + } + + return p, p.Status, nil + } +} + +func waitForLBPoolDelete(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB Pool %s", poolId) + + p, err := pools.Get(networkingClient, poolId).Extract() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return p, "ACTIVE", err + } + if errCode.Actual == 404 { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) + return p, "DELETED", nil + } + } + + log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) + err = pools.Delete(networkingClient, poolId).ExtractErr() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return p, "ACTIVE", err + } + if errCode.Actual == 404 { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) + return p, "DELETED", nil + } + } + + log.Printf("[DEBUG] OpenStack LB Pool %s still active.", poolId) + return p, "ACTIVE", nil + } + +} diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go index 1889c2384..104e35948 100644 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go @@ -7,7 +7,13 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" ) func TestAccLBV1Pool_basic(t *testing.T) { @@ -34,6 +40,37 @@ func TestAccLBV1Pool_basic(t *testing.T) { }) } +func TestAccLBV1Pool_fullstack(t *testing.T) { + var instance1, instance2 servers.Server + var monitor monitors.Monitor + var network networks.Network + var pool pools.Pool + var secgroup secgroups.SecurityGroup + var subnet subnets.Subnet + var vip vips.VirtualIP + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBV1PoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccLBV1Pool_fullstack, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network), + testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet), + testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.secgroup_1", &secgroup), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_1", &instance1), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_2", &instance2), + testAccCheckLBV1PoolExists(t, "openstack_lb_pool_v1.pool_1", &pool), + testAccCheckLBV1MonitorExists(t, "openstack_lb_monitor_v1.monitor_1", &monitor), + testAccCheckLBV1VIPExists(t, "openstack_lb_vip_v1.vip_1", &vip), + ), + }, + }, + }) +} + func testAccCheckLBV1PoolDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) networkingClient, err := config.networkingV2Client(OS_REGION_NAME) @@ -132,3 +169,86 @@ var testAccLBV1Pool_update = fmt.Sprintf(` lb_method = "ROUND_ROBIN" }`, OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME) + +var testAccLBV1Pool_fullstack = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + + resource "openstack_compute_secgroup_v2" "secgroup_1" { + name = "secgroup_1" + description = "Rules for secgroup_1" + + rule { + from_port = -1 + to_port = -1 + ip_protocol = "icmp" + cidr = "0.0.0.0/0" + } + + rule { + from_port = 80 + to_port = 80 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + } + + resource "openstack_compute_instance_v2" "instance_1" { + name = "instance_1" + security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] + network { + uuid = "${openstack_networking_network_v2.network_1.id}" + } + } + + resource "openstack_compute_instance_v2" "instance_2" { + name = "instance_2" + security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] + network { + uuid = "${openstack_networking_network_v2.network_1.id}" + } + } + + resource "openstack_lb_monitor_v1" "monitor_1" { + type = "TCP" + delay = 30 + timeout = 5 + max_retries = 3 + admin_state_up = "true" + } + + resource "openstack_lb_pool_v1" "pool_1" { + name = "pool_1" + protocol = "TCP" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + lb_method = "ROUND_ROBIN" + monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"] + + member { + address = "${openstack_compute_instance_v2.instance_1.access_ip_v4}" + port = 80 + admin_state_up = "true" + } + + member { + address = "${openstack_compute_instance_v2.instance_2.access_ip_v4}" + port = 80 + admin_state_up = "true" + } + } + + resource "openstack_lb_vip_v1" "vip_1" { + name = "vip_1" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + protocol = "TCP" + port = 80 + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + }`) diff --git a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go index dd165df77..3955282c9 100644 --- a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go @@ -3,7 +3,9 @@ package openstack import ( "fmt" "log" + "time" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/rackspace/gophercloud" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" @@ -128,6 +130,22 @@ func resourceLBVipV1Create(d *schema.ResourceData, meta interface{}) error { } log.Printf("[INFO] LB VIP ID: %s", p.ID) + log.Printf("[DEBUG] Waiting for OpenStack LB VIP (%s) to become available.", p.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: "ACTIVE", + Refresh: waitForLBVIPActive(networkingClient, p.ID), + Timeout: 2 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + floatingIP := d.Get("floating_ip").(string) if floatingIP != "" { lbVipV1AssignFloatingIP(floatingIP, p.PortID, networkingClient) @@ -245,7 +263,16 @@ func resourceLBVipV1Delete(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating OpenStack networking client: %s", err) } - err = vips.Delete(networkingClient, d.Id()).ExtractErr() + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: "DELETED", + Refresh: waitForLBVIPDelete(networkingClient, d.Id()), + Timeout: 2 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf("Error deleting OpenStack LB VIP: %s", err) } @@ -298,3 +325,54 @@ func lbVipV1AssignFloatingIP(floatingIP, portID string, networkingClient *gopher return nil } + +func waitForLBVIPActive(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + p, err := vips.Get(networkingClient, vipId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) + if p.Status == "ACTIVE" { + return p, "ACTIVE", nil + } + + return p, p.Status, nil + } +} + +func waitForLBVIPDelete(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB VIP %s", vipId) + + p, err := vips.Get(networkingClient, vipId).Extract() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return p, "ACTIVE", err + } + if errCode.Actual == 404 { + log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) + return p, "DELETED", nil + } + } + + log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) + err = vips.Delete(networkingClient, vipId).ExtractErr() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return p, "ACTIVE", err + } + if errCode.Actual == 404 { + log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) + return p, "DELETED", nil + } + } + + log.Printf("[DEBUG] OpenStack LB VIP %s still active.", vipId) + return p, "ACTIVE", nil + } + +}