Added support for disk init types
Fixed formatting changed 'init_type' to 'type' Fixed acceptance tests for disk provisioning Fixed docs for change from 'init_type' to 'type'
This commit is contained in:
parent
81cf5294cc
commit
6ca1327aa2
|
@ -136,8 +136,8 @@ func testAccCheckVSphereFolderDestroy(s *terraform.State) error {
|
||||||
return fmt.Errorf("error %s", err)
|
return fmt.Errorf("error %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["path"])
|
f, err := object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["path"])
|
||||||
if err == nil {
|
if f != nil {
|
||||||
return fmt.Errorf("Record still exists")
|
return fmt.Errorf("Record still exists")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,7 @@ type networkInterface struct {
|
||||||
type hardDisk struct {
|
type hardDisk struct {
|
||||||
size int64
|
size int64
|
||||||
iops int64
|
iops int64
|
||||||
|
initType string
|
||||||
}
|
}
|
||||||
|
|
||||||
type virtualMachine struct {
|
type virtualMachine struct {
|
||||||
|
@ -235,6 +236,21 @@ func resourceVSphereVirtualMachine() *schema.Resource {
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"type" : &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Default: "eager_zeroed",
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if value != "thin" && value != "eager_zeroed" {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only 'thin' and 'eager_zeroed' are supported values for 'type'"))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
"datastore": &schema.Schema{
|
"datastore": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
@ -381,10 +397,14 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Size argument is required.")
|
return fmt.Errorf("Size argument is required.")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if v, ok := disk["iops"].(int); ok && v != 0 {
|
if v, ok := disk["iops"].(int); ok && v != 0 {
|
||||||
disks[i].iops = int64(v)
|
disks[i].iops = int64(v)
|
||||||
}
|
}
|
||||||
|
if v, ok := disk["type"].(string); ok && v != "" {
|
||||||
|
disks[i].initType = v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
vm.hardDisks = disks
|
vm.hardDisks = disks
|
||||||
log.Printf("[DEBUG] disk init: %v", disks)
|
log.Printf("[DEBUG] disk init: %v", disks)
|
||||||
|
@ -678,7 +698,7 @@ func buildNetworkDevice(f *find.Finder, label, adapterType string) (*types.Virtu
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
|
// buildVMRelocateSpec builds VirtualMachineRelocateSpec to set a place for a new VirtualMachine.
|
||||||
func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine) (types.VirtualMachineRelocateSpec, error) {
|
func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *object.VirtualMachine, initType string) (types.VirtualMachineRelocateSpec, error) {
|
||||||
var key int
|
var key int
|
||||||
|
|
||||||
devices, err := vm.Device(context.TODO())
|
devices, err := vm.Device(context.TODO())
|
||||||
|
@ -691,6 +711,7 @@ func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *obje
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isThin := initType == "thin"
|
||||||
rpr := rp.Reference()
|
rpr := rp.Reference()
|
||||||
dsr := ds.Reference()
|
dsr := ds.Reference()
|
||||||
return types.VirtualMachineRelocateSpec{
|
return types.VirtualMachineRelocateSpec{
|
||||||
|
@ -701,8 +722,8 @@ func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *obje
|
||||||
Datastore: dsr,
|
Datastore: dsr,
|
||||||
DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
|
DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{
|
||||||
DiskMode: "persistent",
|
DiskMode: "persistent",
|
||||||
ThinProvisioned: types.NewBool(false),
|
ThinProvisioned: types.NewBool(isThin),
|
||||||
EagerlyScrub: types.NewBool(true),
|
EagerlyScrub: types.NewBool(!isThin),
|
||||||
},
|
},
|
||||||
DiskId: key,
|
DiskId: key,
|
||||||
},
|
},
|
||||||
|
@ -1057,6 +1078,7 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
|
||||||
object.NewFolder(c.Client, d),
|
object.NewFolder(c.Client, d),
|
||||||
}
|
}
|
||||||
sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
|
sps := buildStoragePlacementSpecClone(c, dcFolders, template, resourcePool, sp)
|
||||||
|
|
||||||
datastore, err = findDatastore(c, sps)
|
datastore, err = findDatastore(c, sps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1068,10 +1090,11 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
|
||||||
}
|
}
|
||||||
log.Printf("[DEBUG] datastore: %#v", datastore)
|
log.Printf("[DEBUG] datastore: %#v", datastore)
|
||||||
|
|
||||||
relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template)
|
relocateSpec, err := buildVMRelocateSpec(resourcePool, datastore, template, vm.hardDisks[0].initType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
|
log.Printf("[DEBUG] relocate spec: %v", relocateSpec)
|
||||||
|
|
||||||
// network
|
// network
|
||||||
|
@ -1226,7 +1249,7 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
|
||||||
log.Printf("[DEBUG]VM customization finished")
|
log.Printf("[DEBUG]VM customization finished")
|
||||||
|
|
||||||
for i := 1; i < len(vm.hardDisks); i++ {
|
for i := 1; i < len(vm.hardDisks); i++ {
|
||||||
err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed")
|
err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,6 +75,69 @@ func TestAccVSphereVirtualMachine_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccVSphereVirtualMachine_diskInitType(t *testing.T) {
|
||||||
|
var vm virtualMachine
|
||||||
|
var locationOpt string
|
||||||
|
var datastoreOpt string
|
||||||
|
|
||||||
|
if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
|
||||||
|
locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v)
|
||||||
|
}
|
||||||
|
if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
|
||||||
|
locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
|
||||||
|
}
|
||||||
|
if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
|
||||||
|
locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
|
||||||
|
}
|
||||||
|
if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
|
||||||
|
datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
|
||||||
|
}
|
||||||
|
template := os.Getenv("VSPHERE_TEMPLATE")
|
||||||
|
gateway := os.Getenv("VSPHERE_NETWORK_GATEWAY")
|
||||||
|
label := os.Getenv("VSPHERE_NETWORK_LABEL")
|
||||||
|
ip_address := os.Getenv("VSPHERE_NETWORK_IP_ADDRESS")
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckVSphereVirtualMachineDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(
|
||||||
|
testAccCheckVSphereVirtualMachineConfig_initType,
|
||||||
|
locationOpt,
|
||||||
|
gateway,
|
||||||
|
label,
|
||||||
|
ip_address,
|
||||||
|
datastoreOpt,
|
||||||
|
template,
|
||||||
|
),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.thin", &vm),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "name", "terraform-test"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "vcpu", "2"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "memory", "4096"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "disk.#", "2"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "disk.0.template", template),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "disk.0.type", "thin"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "disk.1.type", "eager_zeroed"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "network_interface.#", "1"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"vsphere_virtual_machine.thin", "network_interface.0.label", label),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
|
func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
|
||||||
var vm virtualMachine
|
var vm virtualMachine
|
||||||
var locationOpt string
|
var locationOpt string
|
||||||
|
@ -357,9 +420,9 @@ func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), folder, rs.Primary.Attributes["name"])
|
v, err := object.NewSearchIndex(client.Client).FindChild(context.TODO(), folder, rs.Primary.Attributes["name"])
|
||||||
|
|
||||||
if err == nil {
|
if v != nil {
|
||||||
return fmt.Errorf("Record still exists")
|
return fmt.Errorf("Record still exists")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -525,6 +588,30 @@ resource "vsphere_virtual_machine" "foo" {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
const testAccCheckVSphereVirtualMachineConfig_initType = `
|
||||||
|
resource "vsphere_virtual_machine" "thin" {
|
||||||
|
name = "terraform-test"
|
||||||
|
%s
|
||||||
|
vcpu = 2
|
||||||
|
memory = 4096
|
||||||
|
gateway = "%s"
|
||||||
|
network_interface {
|
||||||
|
label = "%s"
|
||||||
|
ipv4_address = "%s"
|
||||||
|
ipv4_prefix_length = 24
|
||||||
|
}
|
||||||
|
disk {
|
||||||
|
%s
|
||||||
|
template = "%s"
|
||||||
|
iops = 500
|
||||||
|
type = "thin"
|
||||||
|
}
|
||||||
|
disk {
|
||||||
|
size = 1
|
||||||
|
iops = 500
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
const testAccCheckVSphereVirtualMachineConfig_dhcp = `
|
const testAccCheckVSphereVirtualMachineConfig_dhcp = `
|
||||||
resource "vsphere_virtual_machine" "bar" {
|
resource "vsphere_virtual_machine" "bar" {
|
||||||
name = "terraform-test"
|
name = "terraform-test"
|
||||||
|
|
|
@ -68,6 +68,7 @@ The `disk` block supports:
|
||||||
* `datastore` - (Optional) Datastore for this disk
|
* `datastore` - (Optional) Datastore for this disk
|
||||||
* `size` - (Required if template not provided) Size of this disk (in GB).
|
* `size` - (Required if template not provided) Size of this disk (in GB).
|
||||||
* `iops` - (Optional) Number of virtual iops to allocate for this disk.
|
* `iops` - (Optional) Number of virtual iops to allocate for this disk.
|
||||||
|
* `type` - (Optional) 'eager_zeroed' (the default), or 'thin' are supported options.
|
||||||
|
|
||||||
## Attributes Reference
|
## Attributes Reference
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue