provider/openstack: Volume Cleanup

This commit cleans up the volume and block device handling in the instance
resource. It also adds more acceptance tests to deal with different workflows
of attaching and detaching a volume through the instance's lifecycle.

No new functionality has been added.
This commit is contained in:
Joe Topjian 2015-09-12 17:56:38 +00:00
parent e75553fd9d
commit 3d3f8122a9
3 changed files with 291 additions and 133 deletions

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
) )
@ -106,6 +107,30 @@ func testAccCheckBlockStorageV1VolumeExists(t *testing.T, n string, volume *volu
} }
} }
func testAccCheckBlockStorageV1VolumeDoesNotExist(t *testing.T, n string, volume *volumes.Volume) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
blockStorageClient, err := config.blockStorageV1Client(OS_REGION_NAME)
if err != nil {
return fmt.Errorf("Error creating OpenStack block storage client: %s", err)
}
_, err = volumes.Get(blockStorageClient, volume.ID).Extract()
if err != nil {
errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)
if !ok {
return err
}
if errCode.Actual == 404 {
return nil
}
return err
}
return fmt.Errorf("Volume still exists")
}
}
func testAccCheckBlockStorageV1VolumeMetadata( func testAccCheckBlockStorageV1VolumeMetadata(
volume *volumes.Volume, k string, v string) resource.TestCheckFunc { volume *volumes.Volume, k string, v string) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {

View File

@ -176,6 +176,11 @@ func resourceComputeInstanceV2() *schema.Resource {
ForceNew: true, ForceNew: true,
}, },
"block_device": &schema.Schema{ "block_device": &schema.Schema{
// TODO: This is a set because we don't support singleton
// sub-resources today. We'll enforce that the set only ever has
// length zero or one below. When TF gains support for
// sub-resources this can be converted.
// As referenced in resource_aws_instance.go
Type: schema.TypeSet, Type: schema.TypeSet,
Optional: true, Optional: true,
ForceNew: true, ForceNew: true,
@ -307,6 +312,13 @@ func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) e
return err return err
} }
// determine if volume/block_device configuration is correct
// this includes ensuring volume_ids are set
// and if only one block_device was specified.
if err := checkVolumeConfig(d); err != nil {
return err
}
networks := make([]servers.Network, len(networkDetails)) networks := make([]servers.Network, len(networkDetails))
for i, net := range networkDetails { for i, net := range networkDetails {
networks[i] = servers.Network{ networks[i] = servers.Network{
@ -338,9 +350,6 @@ func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) e
if v, ok := d.GetOk("block_device"); ok { if v, ok := d.GetOk("block_device"); ok {
vL := v.(*schema.Set).List() vL := v.(*schema.Set).List()
if len(vL) > 1 {
return fmt.Errorf("Can only specify one block device to boot from.")
}
for _, v := range vL { for _, v := range vL {
blockDeviceRaw := v.(map[string]interface{}) blockDeviceRaw := v.(map[string]interface{})
blockDevice := resourceInstanceBlockDeviceV2(d, blockDeviceRaw) blockDevice := resourceInstanceBlockDeviceV2(d, blockDeviceRaw)
@ -362,23 +371,6 @@ func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) e
} }
} }
// Boot From Volume makes the root volume/disk appear as an attached volume.
// Because of that, and in order to accurately report volume status, the volume_id
// of the "volume" parameter must be computed and optional.
// However, a volume_id, of course, is required to attach a volume. We do the check
// here to fail early (before the instance is created) if a volume_id was not specified.
if v := d.Get("volume"); v != nil {
vols := v.(*schema.Set).List()
if len(vols) > 0 {
for _, v := range vols {
va := v.(map[string]interface{})
if va["volume_id"].(string) == "" {
return fmt.Errorf("A volume_id must be specified when attaching volumes.")
}
}
}
}
log.Printf("[DEBUG] Create Options: %#v", createOpts) log.Printf("[DEBUG] Create Options: %#v", createOpts)
server, err := servers.Create(computeClient, createOpts).Extract() server, err := servers.Create(computeClient, createOpts).Extract()
if err != nil { if err != nil {
@ -417,17 +409,14 @@ func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) e
} }
} }
// were volume attachments specified? // if volumes were specified, attach them after the instance has launched.
if v := d.Get("volume"); v != nil { if v, ok := d.GetOk("volume"); ok {
vols := v.(*schema.Set).List() vols := v.(*schema.Set).List()
if len(vols) > 0 { if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil {
if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err)
return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } else {
} else { if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), vols); err != nil {
return err
if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), vols); err != nil {
return err
}
} }
} }
} }
@ -578,21 +567,9 @@ func resourceComputeInstanceV2Read(d *schema.ResourceData, meta interface{}) err
d.Set("image_name", image.Name) d.Set("image_name", image.Name)
// volume attachments // volume attachments
vas, err := getVolumeAttachments(computeClient, d.Id()) if err := getVolumeAttachments(computeClient, d); err != nil {
if err != nil {
return err return err
} }
if len(vas) > 0 {
attachments := make([]map[string]interface{}, len(vas))
for i, attachment := range vas {
attachments[i] = make(map[string]interface{})
attachments[i]["id"] = attachment.ID
attachments[i]["volume_id"] = attachment.VolumeID
attachments[i]["device"] = attachment.Device
}
log.Printf("[INFO] Volume attachments: %v", attachments)
d.Set("volume", attachments)
}
return nil return nil
} }
@ -702,30 +679,31 @@ func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) e
} }
if d.HasChange("volume") { if d.HasChange("volume") {
// ensure the volume configuration is correct
if err := checkVolumeConfig(d); err != nil {
return err
}
// old attachments and new attachments // old attachments and new attachments
oldAttachments, newAttachments := d.GetChange("volume") oldAttachments, newAttachments := d.GetChange("volume")
// for each old attachment, detach the volume // for each old attachment, detach the volume
oldAttachmentSet := oldAttachments.(*schema.Set).List() oldAttachmentSet := oldAttachments.(*schema.Set).List()
if len(oldAttachmentSet) > 0 { if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil {
if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil { return err
} else {
if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), oldAttachmentSet); err != nil {
return err return err
} else {
if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), oldAttachmentSet); err != nil {
return err
}
} }
} }
// for each new attachment, attach the volume // for each new attachment, attach the volume
newAttachmentSet := newAttachments.(*schema.Set).List() newAttachmentSet := newAttachments.(*schema.Set).List()
if len(newAttachmentSet) > 0 { if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil {
if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil { return err
} else {
if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), newAttachmentSet); err != nil {
return err return err
} else {
if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), newAttachmentSet); err != nil {
return err
}
} }
} }
@ -1112,81 +1090,78 @@ func resourceComputeSchedulerHintsHash(v interface{}) int {
} }
func attachVolumesToInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error { func attachVolumesToInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error {
if len(vols) > 0 { for _, v := range vols {
for _, v := range vols { va := v.(map[string]interface{})
va := v.(map[string]interface{}) volumeId := va["volume_id"].(string)
volumeId := va["volume_id"].(string) device := va["device"].(string)
device := va["device"].(string)
s := "" s := ""
if serverId != "" { if serverId != "" {
s = serverId s = serverId
} else if va["server_id"] != "" { } else if va["server_id"] != "" {
s = va["server_id"].(string) s = va["server_id"].(string)
} else { } else {
return fmt.Errorf("Unable to determine server ID to attach volume.") return fmt.Errorf("Unable to determine server ID to attach volume.")
}
vaOpts := &volumeattach.CreateOpts{
Device: device,
VolumeID: volumeId,
}
if _, err := volumeattach.Create(computeClient, s, vaOpts).Extract(); err != nil {
return err
}
stateConf := &resource.StateChangeConf{
Pending: []string{"attaching", "available"},
Target: "in-use",
Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)),
Timeout: 30 * time.Minute,
Delay: 5 * time.Second,
MinTimeout: 2 * time.Second,
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
log.Printf("[INFO] Attached volume %s to instance %s", volumeId, serverId)
} }
vaOpts := &volumeattach.CreateOpts{
Device: device,
VolumeID: volumeId,
}
if _, err := volumeattach.Create(computeClient, s, vaOpts).Extract(); err != nil {
return err
}
stateConf := &resource.StateChangeConf{
Pending: []string{"attaching", "available"},
Target: "in-use",
Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)),
Timeout: 30 * time.Minute,
Delay: 5 * time.Second,
MinTimeout: 2 * time.Second,
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
log.Printf("[INFO] Attached volume %s to instance %s", volumeId, serverId)
} }
return nil return nil
} }
func detachVolumesFromInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error { func detachVolumesFromInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error {
if len(vols) > 0 { for _, v := range vols {
for _, v := range vols { va := v.(map[string]interface{})
va := v.(map[string]interface{}) aId := va["id"].(string)
aId := va["id"].(string)
if err := volumeattach.Delete(computeClient, serverId, aId).ExtractErr(); err != nil { if err := volumeattach.Delete(computeClient, serverId, aId).ExtractErr(); err != nil {
return err return err
}
stateConf := &resource.StateChangeConf{
Pending: []string{"detaching", "in-use"},
Target: "available",
Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)),
Timeout: 30 * time.Minute,
Delay: 5 * time.Second,
MinTimeout: 2 * time.Second,
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
log.Printf("[INFO] Detached volume %s from instance %s", va["volume_id"], serverId)
} }
stateConf := &resource.StateChangeConf{
Pending: []string{"detaching", "in-use"},
Target: "available",
Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)),
Timeout: 30 * time.Minute,
Delay: 5 * time.Second,
MinTimeout: 2 * time.Second,
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
log.Printf("[INFO] Detached volume %s from instance %s", va["volume_id"], serverId)
} }
return nil return nil
} }
func getVolumeAttachments(computeClient *gophercloud.ServiceClient, serverId string) ([]volumeattach.VolumeAttachment, error) { func getVolumeAttachments(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) error {
var attachments []volumeattach.VolumeAttachment var attachments []volumeattach.VolumeAttachment
err := volumeattach.List(computeClient, serverId).EachPage(func(page pagination.Page) (bool, error) {
err := volumeattach.List(computeClient, d.Id()).EachPage(func(page pagination.Page) (bool, error) {
actual, err := volumeattach.ExtractVolumeAttachments(page) actual, err := volumeattach.ExtractVolumeAttachments(page)
if err != nil { if err != nil {
return false, err return false, err
@ -1197,8 +1172,45 @@ func getVolumeAttachments(computeClient *gophercloud.ServiceClient, serverId str
}) })
if err != nil { if err != nil {
return nil, err return err
} }
return attachments, nil vols := make([]map[string]interface{}, len(attachments))
for i, attachment := range attachments {
vols[i] = make(map[string]interface{})
vols[i]["id"] = attachment.ID
vols[i]["volume_id"] = attachment.VolumeID
vols[i]["device"] = attachment.Device
}
log.Printf("[INFO] Volume attachments: %v", vols)
d.Set("volume", vols)
return nil
}
func checkVolumeConfig(d *schema.ResourceData) error {
// Although a volume_id is required to attach a volume, in order to be able to report
// the attached volumes of an instance, it must be "computed" and thus "optional".
// This accounts for situations such as "boot from volume" as well as volumes being
// attached to the instance outside of Terraform.
if v := d.Get("volume"); v != nil {
vols := v.(*schema.Set).List()
if len(vols) > 0 {
for _, v := range vols {
va := v.(map[string]interface{})
if va["volume_id"].(string) == "" {
return fmt.Errorf("A volume_id must be specified when attaching volumes.")
}
}
}
}
if v, ok := d.GetOk("block_device"); ok {
vL := v.(*schema.Set).List()
if len(vL) > 1 {
return fmt.Errorf("Can only specify one block device to boot from.")
}
}
return nil
} }

View File

@ -51,6 +51,20 @@ func TestAccComputeV2Instance_volumeAttach(t *testing.T) {
var instance servers.Server var instance servers.Server
var volume volumes.Volume var volume volumes.Volume
var testAccComputeV2Instance_volumeAttach = fmt.Sprintf(`
resource "openstack_blockstorage_volume_v1" "myvol" {
name = "myvol"
size = 1
}
resource "openstack_compute_instance_v2" "foo" {
name = "terraform-test"
security_groups = ["default"]
volume {
volume_id = "${openstack_blockstorage_volume_v1.myvol.id}"
}
}`)
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders, Providers: testAccProviders,
@ -68,6 +82,102 @@ func TestAccComputeV2Instance_volumeAttach(t *testing.T) {
}) })
} }
func TestAccComputeV2Instance_volumeAttachPostCreation(t *testing.T) {
var instance servers.Server
var volume volumes.Volume
var testAccComputeV2Instance_volumeAttachPostCreationInstance = fmt.Sprintf(`
resource "openstack_compute_instance_v2" "foo" {
name = "terraform-test"
security_groups = ["default"]
}`)
var testAccComputeV2Instance_volumeAttachPostCreationInstanceAndVolume = fmt.Sprintf(`
resource "openstack_blockstorage_volume_v1" "myvol" {
name = "myvol"
size = 1
}
resource "openstack_compute_instance_v2" "foo" {
name = "terraform-test"
security_groups = ["default"]
volume {
volume_id = "${openstack_blockstorage_volume_v1.myvol.id}"
}
}`)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeV2InstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeV2Instance_volumeAttachPostCreationInstance,
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
),
},
resource.TestStep{
Config: testAccComputeV2Instance_volumeAttachPostCreationInstanceAndVolume,
Check: resource.ComposeTestCheckFunc(
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.myvol", &volume),
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume),
),
},
},
})
}
func TestAccComputeV2Instance_volumeDetachPostCreation(t *testing.T) {
var instance servers.Server
var volume volumes.Volume
var testAccComputeV2Instance_volumeDetachPostCreationInstanceAndVolume = fmt.Sprintf(`
resource "openstack_blockstorage_volume_v1" "myvol" {
name = "myvol"
size = 1
}
resource "openstack_compute_instance_v2" "foo" {
name = "terraform-test"
security_groups = ["default"]
volume {
volume_id = "${openstack_blockstorage_volume_v1.myvol.id}"
}
}`)
var testAccComputeV2Instance_volumeDetachPostCreationInstance = fmt.Sprintf(`
resource "openstack_compute_instance_v2" "foo" {
name = "terraform-test"
security_groups = ["default"]
}`)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeV2InstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeV2Instance_volumeDetachPostCreationInstanceAndVolume,
Check: resource.ComposeTestCheckFunc(
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.myvol", &volume),
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume),
),
},
resource.TestStep{
Config: testAccComputeV2Instance_volumeDetachPostCreationInstance,
Check: resource.ComposeTestCheckFunc(
testAccCheckBlockStorageV1VolumeDoesNotExist(t, "openstack_blockstorage_volume_v1.myvol", &volume),
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
testAccCheckComputeV2InstanceVolumesDetached(&instance),
),
},
},
})
}
func TestAccComputeV2Instance_floatingIPAttach(t *testing.T) { func TestAccComputeV2Instance_floatingIPAttach(t *testing.T) {
var instance servers.Server var instance servers.Server
var fip floatingip.FloatingIP var fip floatingip.FloatingIP
@ -282,6 +392,33 @@ func testAccCheckComputeV2InstanceVolumeAttachment(
} }
} }
func testAccCheckComputeV2InstanceVolumesDetached(instance *servers.Server) resource.TestCheckFunc {
return func(s *terraform.State) error {
var attachments []volumeattach.VolumeAttachment
config := testAccProvider.Meta().(*Config)
computeClient, err := config.computeV2Client(OS_REGION_NAME)
if err != nil {
return err
}
err = volumeattach.List(computeClient, instance.ID).EachPage(func(page pagination.Page) (bool, error) {
actual, err := volumeattach.ExtractVolumeAttachments(page)
if err != nil {
return false, fmt.Errorf("Unable to lookup attachment: %s", err)
}
attachments = actual
return true, nil
})
if len(attachments) > 0 {
return fmt.Errorf("Volumes are still attached.")
}
return nil
}
}
func testAccCheckComputeV2InstanceBootVolumeAttachment( func testAccCheckComputeV2InstanceBootVolumeAttachment(
instance *servers.Server) resource.TestCheckFunc { instance *servers.Server) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
@ -321,19 +458,3 @@ func testAccCheckComputeV2InstanceFloatingIPAttach(
} }
} }
var testAccComputeV2Instance_volumeAttach = fmt.Sprintf(`
resource "openstack_blockstorage_volume_v1" "myvol" {
name = "myvol"
size = 1
}
resource "openstack_compute_instance_v2" "foo" {
region = "%s"
name = "terraform-test"
security_groups = ["default"]
volume {
volume_id = "${openstack_blockstorage_volume_v1.myvol.id}"
}
}`,
OS_REGION_NAME)