diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index 3fc317971..8b984b575 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -447,6 +447,12 @@ func resourceVSphereVirtualMachine() *schema.Resource { }, }, + "detach_unknown_disks_on_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "cdrom": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -1149,10 +1155,11 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{ } // Safely eject any disks the user marked as keep_on_remove + var diskSetList []interface{} if vL, ok := d.GetOk("disk"); ok { if diskSet, ok := vL.(*schema.Set); ok { - - for _, value := range diskSet.List() { + diskSetList = diskSet.List() + for _, value := range diskSetList { disk := value.(map[string]interface{}) if v, ok := disk["keep_on_remove"].(bool); ok && v == true { @@ -1168,6 +1175,36 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{ } } + // Safely eject any disks that are not managed by this resource + if v, ok := d.GetOk("detach_unknown_disks_on_delete"); ok && v.(bool) { + var disksToRemove object.VirtualDeviceList + for _, device := range devices { + if devices.TypeName(device) != "VirtualDisk" { + continue + } + vd := device.GetVirtualDevice() + var skip bool + for _, value := range diskSetList { + disk := value.(map[string]interface{}) + if int32(disk["key"].(int)) == vd.Key { + skip = true + break + } + } + if skip { + continue + } + disksToRemove = append(disksToRemove, device) + } + if len(disksToRemove) != 0 { + err = vm.RemoveDevice(context.TODO(), true, disksToRemove...) + if err != nil { + log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) + return err + } + } + } + task, err := vm.Destroy(context.TODO()) if err != nil { return err diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index 8795c2a8f..8f9ef96ae 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -214,7 +214,7 @@ type TestFuncData struct { func (test TestFuncData) testCheckFuncBasic() ( resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc, resource.TestCheckFunc) { - // log.Printf("[DEBUG] data= %v", test) + //log.Printf("[DEBUG] data= %v", test) mem := test.mem if mem == "" { mem = "1024" @@ -1349,9 +1349,9 @@ resource "vsphere_virtual_machine" "keep_disk" { disk { size = 1 iops = 500 - controller_type = "scsi" - name = "one" - keep_on_remove = true + controller_type = "scsi" + name = "one" + keep_on_remove = true } } ` @@ -1389,13 +1389,137 @@ func TestAccVSphereVirtualMachine_keepOnRemove(t *testing.T) { }, resource.TestStep{ Config: " ", - Check: checkForDisk(datacenter, datastore, "terraform-test", "one.vmdk"), + Check: checkForDisk(datacenter, datastore, "terraform-test", "one.vmdk", true, true), }, }, }) } -func checkForDisk(datacenter string, datastore string, vmName string, path string) resource.TestCheckFunc { +const testAccVSphereVirtualMachine_DetachUnknownDisks = ` +resource "vsphere_virtual_machine" "detach_unkown_disks" { + name = "terraform-test" +` + testAccTemplateBasicBody + ` + detach_unknown_disks_on_delete = true + disk { + size = 1 + iops = 500 + controller_type = "scsi" + name = "one" + keep_on_remove = true + } + disk { + size = 2 + iops = 500 + controller_type = "scsi" + name = "two" + keep_on_remove = false + } + disk { + size = 3 + iops = 500 + controller_type = "scsi" + name = "three" + keep_on_remove = true + } +} +` + +func TestAccVSphereVirtualMachine_DetachUnknownDisks(t *testing.T) { + var vm virtualMachine + basic_vars := setupTemplateBasicBodyVars() + config := basic_vars.testSprintfTemplateBody(testAccVSphereVirtualMachine_DetachUnknownDisks) + var datastore string + if v := os.Getenv("VSPHERE_DATASTORE"); v != "" { + datastore = v + } + var datacenter string + if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { + datacenter = v + } + + vmName := "vsphere_virtual_machine.detach_unkown_disks" + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := + TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "4"}.testCheckFuncBasic() + + log.Printf("[DEBUG] template= %s", testAccVSphereVirtualMachine_DetachUnknownDisks) + log.Printf("[DEBUG] template config= %s", config) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, + ), + }, + resource.TestStep{ + PreConfig: func() { + createAndAttachDisk(t, "terraform-test", 1, datastore, "terraform-test/tf_custom_disk.vmdk", "lazy", "scsi", datacenter) + }, + Config: " ", + Check: resource.ComposeTestCheckFunc( + checkForDisk(datacenter, datastore, "terraform-test", "one.vmdk", true, false), + checkForDisk(datacenter, datastore, "terraform-test", "two.vmdk", false, false), + checkForDisk(datacenter, datastore, "terraform-test", "three.vmdk", true, false), + checkForDisk(datacenter, datastore, "terraform-test", "tf_custom_disk.vmdk", true, true), + ), + }, + }, + }) +} + +func createAndAttachDisk(t *testing.T, vmName string, size int, datastore string, diskPath string, diskType string, adapterType string, datacenter string) { + client := testAccProvider.Meta().(*govmomi.Client) + finder := find.NewFinder(client.Client, true) + + dc, err := finder.Datacenter(context.TODO(), datacenter) + if err != nil { + log.Printf("[ERROR] finding Datacenter %s: %v", datacenter, err) + t.Fail() + return + } + finder = finder.SetDatacenter(dc) + ds, err := getDatastore(finder, datastore) + if err != nil { + log.Printf("[ERROR] getDatastore %s: %v", datastore, err) + t.Fail() + return + } + vm, err := finder.VirtualMachine(context.TODO(), vmName) + if err != nil { + log.Printf("[ERROR] finding VM %s: %v", vmName, err) + t.Fail() + return + } + err = addHardDisk(vm, int64(size), int64(0), diskType, ds, diskPath, adapterType) + if err != nil { + log.Printf("[ERROR] addHardDisk: %v", err) + t.Fail() + return + } +} + +func vmCleanup(dc *object.Datacenter, ds *object.Datastore, vmName string) error { + client := testAccProvider.Meta().(*govmomi.Client) + fileManager := object.NewFileManager(client.Client) + task, err := fileManager.DeleteDatastoreFile(context.TODO(), ds.Path(vmName), dc) + if err != nil { + log.Printf("[ERROR] checkForDisk - Couldn't delete vm folder '%v': %v", vmName, err) + return err + } + + _, err = task.WaitForResult(context.TODO(), nil) + if err != nil { + log.Printf("[ERROR] checForDisk - Failed while deleting vm folder '%v': %v", vmName, err) + return err + } + return nil +} + +func checkForDisk(datacenter string, datastore string, vmName string, path string, exists bool, cleanup bool) resource.TestCheckFunc { return func(s *terraform.State) error { client := testAccProvider.Meta().(*govmomi.Client) finder := find.NewFinder(client.Client, true) @@ -1415,23 +1539,25 @@ func checkForDisk(datacenter string, datastore string, vmName string, path strin diskPath := vmName + "/" + path _, err = ds.Stat(context.TODO(), diskPath) - if err != nil { + if err != nil && exists { log.Printf("[ERROR] checkForDisk - Couldn't stat file '%v': %v", diskPath, err) return err + } else if err == nil && !exists { + errorMessage := fmt.Sprintf("checkForDisk - disk %s still exists", diskPath) + err = vmCleanup(dc, ds, vmName) + if err != nil { + return fmt.Errorf("[ERROR] %s, cleanup also failed: %v", errorMessage, err) + } + return fmt.Errorf("[ERROR] %s", errorMessage) } - // Cleanup - fileManager := object.NewFileManager(client.Client) - task, err := fileManager.DeleteDatastoreFile(context.TODO(), ds.Path(vmName), dc) - if err != nil { - log.Printf("[ERROR] checkForDisk - Couldn't delete vm folder '%v': %v", vmName, err) - return err + if !cleanup || !exists { + return nil } - _, err = task.WaitForResult(context.TODO(), nil) + err = vmCleanup(dc, ds, vmName) if err != nil { - log.Printf("[ERROR] checForDisk - Failed while deleting vm folder '%v': %v", vmName, err) - return err + return fmt.Errorf("[ERROR] cleanup failed: %v", err) } return nil diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown index 1ad81742a..f1bfbc91f 100644 --- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown @@ -75,6 +75,7 @@ The following arguments are supported: * `dns_servers` - (Optional) List of DNS servers for the virtual network adapter; defaults to 8.8.8.8, 8.8.4.4 * `network_interface` - (Required) Configures virtual network interfaces; see [Network Interfaces](#network-interfaces) below for details. * `disk` - (Required) Configures virtual disks; see [Disks](#disks) below for details +* `detach_unknown_disks_on_delete` - (Optional) will detach disks not managed by this resource on delete (avoids deletion of disks attached after resource creation outside of Terraform scope). * `cdrom` - (Optional) Configures a CDROM device and mounts an image as its media; see [CDROM](#cdrom) below for more details. * `windows_opt_config` - (Optional) Extra options for clones of Windows machines. * `linked_clone` - (Optional) Specifies if the new machine is a [linked clone](https://www.vmware.com/support/ws5/doc/ws_clone_overview.html#wp1036396) of another machine or not.