provider/openstack: Disassociate Monitors from Pool Before Deletion
This commit ensures that all monitors have been disassociated from the load balancing pool before the pool is deleted. A test has been added to ensure that a full load balancing stack is capable of handling an update to an instance, causing some components to be rebuilt.
This commit is contained in:
parent
e7c9031dd4
commit
c62dc3f72f
|
@ -299,6 +299,19 @@ func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error {
|
|||
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
|
||||
}
|
||||
|
||||
// Make sure all monitors are disassociated first
|
||||
if v, ok := d.GetOk("monitor_ids"); ok {
|
||||
if monitorIDList, ok := v.([]interface{}); ok {
|
||||
for _, monitorID := range monitorIDList {
|
||||
mID := monitorID.(string)
|
||||
log.Printf("[DEBUG] Attempting to disassociate monitor %s from pool %s", mID, d.Id())
|
||||
if res := pools.DisassociateMonitor(networkingClient, d.Id(), mID); res.Err != nil {
|
||||
return fmt.Errorf("Error disassociating monitor %s from pool %s: %s", mID, d.Id(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Pending: []string{"ACTIVE", "PENDING_DELETE"},
|
||||
Target: []string{"DELETED"},
|
||||
|
|
|
@ -56,7 +56,20 @@ func TestAccLBV1Pool_fullstack(t *testing.T) {
|
|||
CheckDestroy: testAccCheckLBV1PoolDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccLBV1Pool_fullstack,
|
||||
Config: testAccLBV1Pool_fullstack_1,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network),
|
||||
testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet),
|
||||
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.secgroup_1", &secgroup),
|
||||
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_1", &instance1),
|
||||
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_2", &instance2),
|
||||
testAccCheckLBV1PoolExists(t, "openstack_lb_pool_v1.pool_1", &pool),
|
||||
testAccCheckLBV1MonitorExists(t, "openstack_lb_monitor_v1.monitor_1", &monitor),
|
||||
testAccCheckLBV1VIPExists(t, "openstack_lb_vip_v1.vip_1", &vip),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccLBV1Pool_fullstack_2,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network),
|
||||
testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet),
|
||||
|
@ -172,7 +185,7 @@ var testAccLBV1Pool_update = fmt.Sprintf(`
|
|||
}`,
|
||||
OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME)
|
||||
|
||||
var testAccLBV1Pool_fullstack = fmt.Sprintf(`
|
||||
var testAccLBV1Pool_fullstack_1 = fmt.Sprintf(`
|
||||
resource "openstack_networking_network_v2" "network_1" {
|
||||
name = "network_1"
|
||||
admin_state_up = "true"
|
||||
|
@ -257,3 +270,90 @@ var testAccLBV1Pool_fullstack = fmt.Sprintf(`
|
|||
pool_id = "${openstack_lb_pool_v1.pool_1.id}"
|
||||
admin_state_up = true
|
||||
}`)
|
||||
|
||||
var testAccLBV1Pool_fullstack_2 = fmt.Sprintf(`
|
||||
resource "openstack_networking_network_v2" "network_1" {
|
||||
name = "network_1"
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "subnet_1" {
|
||||
network_id = "${openstack_networking_network_v2.network_1.id}"
|
||||
cidr = "192.168.199.0/24"
|
||||
ip_version = 4
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "secgroup_1" {
|
||||
name = "secgroup_1"
|
||||
description = "Rules for secgroup_1"
|
||||
|
||||
rule {
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
ip_protocol = "icmp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
rule {
|
||||
from_port = 80
|
||||
to_port = 80
|
||||
ip_protocol = "tcp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "instance_1" {
|
||||
name = "instance_1"
|
||||
security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"]
|
||||
network {
|
||||
uuid = "${openstack_networking_network_v2.network_1.id}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "instance_2" {
|
||||
name = "instance_2"
|
||||
security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"]
|
||||
user_data = "#cloud-config\ndisable_root: false"
|
||||
network {
|
||||
uuid = "${openstack_networking_network_v2.network_1.id}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_lb_monitor_v1" "monitor_1" {
|
||||
type = "TCP"
|
||||
delay = 30
|
||||
timeout = 5
|
||||
max_retries = 3
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_lb_pool_v1" "pool_1" {
|
||||
name = "pool_1"
|
||||
protocol = "TCP"
|
||||
subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
||||
lb_method = "ROUND_ROBIN"
|
||||
monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"]
|
||||
}
|
||||
|
||||
resource "openstack_lb_member_v1" "member_1" {
|
||||
pool_id = "${openstack_lb_pool_v1.pool_1.id}"
|
||||
address = "${openstack_compute_instance_v2.instance_1.access_ip_v4}"
|
||||
port = 80
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
resource "openstack_lb_member_v1" "member_2" {
|
||||
pool_id = "${openstack_lb_pool_v1.pool_1.id}"
|
||||
address = "${openstack_compute_instance_v2.instance_2.access_ip_v4}"
|
||||
port = 80
|
||||
admin_state_up = true
|
||||
}
|
||||
|
||||
resource "openstack_lb_vip_v1" "vip_1" {
|
||||
name = "vip_1"
|
||||
subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
||||
protocol = "TCP"
|
||||
port = 80
|
||||
pool_id = "${openstack_lb_pool_v1.pool_1.id}"
|
||||
admin_state_up = true
|
||||
}`)
|
||||
|
|
Loading…
Reference in New Issue