flattened managed disk structure in virtual machine
refactored managed disk acceptance tests
This commit is contained in:
parent
ac8eae9eff
commit
5d915dd4f5
|
@ -19,11 +19,11 @@ func TestAccAzureRMVirtualMachine_importBasic(t *testing.T) {
|
|||
Providers: testAccProviders,
|
||||
CheckDestroy: testCheckAzureRMVirtualMachineDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: config,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
|
@ -40,18 +40,18 @@ func TestAccAzureRMVirtualMachine_importBasic_managedDisk(t *testing.T) {
|
|||
resourceName := "azurerm_virtual_machine.test"
|
||||
|
||||
ri := acctest.RandInt()
|
||||
config := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk, ri, ri, ri, ri, ri, ri)
|
||||
config := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_explicit, ri, ri, ri, ri, ri, ri, ri)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testCheckAzureRMVirtualMachineDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: config,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
|
|
|
@ -24,7 +24,7 @@ func TestAccAzureRMManagedDisk_empty(t *testing.T) {
|
|||
{
|
||||
Config: config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d),
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -55,7 +55,7 @@ func TestAccAzureRMManagedDisk_import(t *testing.T) {
|
|||
{
|
||||
Config: config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d),
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -74,7 +74,7 @@ func TestAccAzureRMManagedDisk_copy(t *testing.T) {
|
|||
{
|
||||
Config: config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d),
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true),
|
||||
),
|
||||
},
|
||||
},
|
||||
|
@ -95,7 +95,7 @@ func TestAccAzureRMManagedDisk_update(t *testing.T) {
|
|||
{
|
||||
Config: preConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d),
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true),
|
||||
resource.TestCheckResourceAttr(
|
||||
"azurerm_managed_disk.test", "tags.%", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -103,7 +103,7 @@ func TestAccAzureRMManagedDisk_update(t *testing.T) {
|
|||
resource.TestCheckResourceAttr(
|
||||
"azurerm_managed_disk.test", "tags.cost-center", "ops"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"azurerm_managed_disk.test", "disk_size_gb", "20"),
|
||||
"azurerm_managed_disk.test", "disk_size_gb", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"azurerm_managed_disk.test", "storage_account_type", string(disk.StandardLRS)),
|
||||
),
|
||||
|
@ -111,13 +111,13 @@ func TestAccAzureRMManagedDisk_update(t *testing.T) {
|
|||
{
|
||||
Config: postConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d),
|
||||
testCheckAzureRMManagedDiskExists("azurerm_managed_disk.test", &d, true),
|
||||
resource.TestCheckResourceAttr(
|
||||
"azurerm_managed_disk.test", "tags.%", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"azurerm_managed_disk.test", "tags.environment", "acctest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"azurerm_managed_disk.test", "disk_size_gb", "30"),
|
||||
"azurerm_managed_disk.test", "disk_size_gb", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"azurerm_managed_disk.test", "storage_account_type", string(disk.PremiumLRS)),
|
||||
),
|
||||
|
@ -126,7 +126,7 @@ func TestAccAzureRMManagedDisk_update(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func testCheckAzureRMManagedDiskExists(name string, d *disk.Model) resource.TestCheckFunc {
|
||||
func testCheckAzureRMManagedDiskExists(name string, d *disk.Model, shouldExist bool) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[name]
|
||||
if !ok {
|
||||
|
@ -146,9 +146,12 @@ func testCheckAzureRMManagedDiskExists(name string, d *disk.Model) resource.Test
|
|||
return fmt.Errorf("Bad: Get on diskClient: %s", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
if resp.StatusCode == http.StatusNotFound && shouldExist {
|
||||
return fmt.Errorf("Bad: ManagedDisk %q (resource group %q) does not exist", dName, resourceGroup)
|
||||
}
|
||||
if resp.StatusCode != http.StatusNotFound && !shouldExist {
|
||||
return fmt.Errorf("Bad: ManagedDisk %q (resource group %q) still exists", dName, resourceGroup)
|
||||
}
|
||||
|
||||
*d = resp
|
||||
|
||||
|
@ -217,7 +220,7 @@ resource "azurerm_managed_disk" "test" {
|
|||
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||
storage_account_type = "Standard_LRS"
|
||||
create_option = "Empty"
|
||||
disk_size_gb = "20"
|
||||
disk_size_gb = "1"
|
||||
|
||||
tags {
|
||||
environment = "acctest"
|
||||
|
@ -309,7 +312,7 @@ resource "azurerm_managed_disk" "test" {
|
|||
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||
storage_account_type = "Premium_LRS"
|
||||
create_option = "Empty"
|
||||
disk_size_gb = "30"
|
||||
disk_size_gb = "2"
|
||||
|
||||
tags {
|
||||
environment = "acctest"
|
||||
|
|
|
@ -147,27 +147,22 @@ func resourceArmVirtualMachine() *schema.Resource {
|
|||
ForceNew: true,
|
||||
},
|
||||
|
||||
"managed_disk": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"managed_disk_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_account_type": {
|
||||
|
||||
"managed_disk_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(compute.PremiumLRS),
|
||||
string(compute.StandardLRS),
|
||||
}, true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"image_uri": {
|
||||
Type: schema.TypeString,
|
||||
|
@ -216,27 +211,22 @@ func resourceArmVirtualMachine() *schema.Resource {
|
|||
Optional: true,
|
||||
},
|
||||
|
||||
"managed_disk": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"managed_disk_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
"storage_account_type": {
|
||||
|
||||
"managed_disk_type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(compute.PremiumLRS),
|
||||
string(compute.StandardLRS),
|
||||
}, true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"create_option": {
|
||||
Type: schema.TypeString,
|
||||
|
@ -708,21 +698,29 @@ func resourceArmVirtualMachineDelete(d *schema.ResourceData, meta interface{}) e
|
|||
|
||||
// delete OS Disk if opted in
|
||||
if deleteOsDisk := d.Get("delete_os_disk_on_termination").(bool); deleteOsDisk {
|
||||
log.Printf("[INFO] delete_os_disk_on_termination is enabled, deleting")
|
||||
log.Printf("[INFO] delete_os_disk_on_termination is enabled, deleting disk from %s", name)
|
||||
|
||||
osDisk, err := expandAzureRmVirtualMachineOsDisk(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error expanding OS Disk: %s", err)
|
||||
}
|
||||
|
||||
if osDisk.Vhd != nil {
|
||||
if err = resourceArmVirtualMachineDeleteVhd(*osDisk.Vhd.URI, meta); err != nil {
|
||||
return fmt.Errorf("Error deleting OS Disk VHD: %s", err)
|
||||
}
|
||||
} else if osDisk.ManagedDisk != nil {
|
||||
if err = resourceArmVirtualMachineDeleteManagedDisk(*osDisk.ManagedDisk.ID, meta); err != nil {
|
||||
return fmt.Errorf("Error deleting OS Managed Disk: %s", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unable to locate OS managed disk properties from %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
// delete Data disks if opted in
|
||||
if deleteDataDisks := d.Get("delete_data_disks_on_termination").(bool); deleteDataDisks {
|
||||
log.Printf("[INFO] delete_data_disks_on_termination is enabled, deleting each data disk")
|
||||
log.Printf("[INFO] delete_data_disks_on_termination is enabled, deleting each data disk from %s", name)
|
||||
|
||||
disks, err := expandAzureRmVirtualMachineDataDisk(d)
|
||||
if err != nil {
|
||||
|
@ -730,8 +728,16 @@ func resourceArmVirtualMachineDelete(d *schema.ResourceData, meta interface{}) e
|
|||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
if disk.Vhd != nil {
|
||||
if err = resourceArmVirtualMachineDeleteVhd(*disk.Vhd.URI, meta); err != nil {
|
||||
return fmt.Errorf("Error deleting Data Disk VHD: %s", err)
|
||||
return fmt.Errorf("Error deleting Data Managed Disk: %s", err)
|
||||
}
|
||||
} else if disk.ManagedDisk != nil {
|
||||
if err = resourceArmVirtualMachineDeleteManagedDisk(*disk.ManagedDisk.ID, meta); err != nil {
|
||||
return fmt.Errorf("Error deleting Data Managed Disk: %s", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unable to locate data managed disk properties from %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -775,6 +781,24 @@ func resourceArmVirtualMachineDeleteVhd(uri string, meta interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func resourceArmVirtualMachineDeleteManagedDisk(managedDiskID string, meta interface{}) error {
|
||||
diskClient := meta.(*ArmClient).diskClient
|
||||
|
||||
id, err := parseAzureResourceID(managedDiskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resGroup := id.ResourceGroup
|
||||
name := id.Path["disks"]
|
||||
|
||||
_, err = diskClient.Delete(resGroup, name, make(chan struct{}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting Managed Disk (%s %s) %s", name, resGroup, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceArmVirtualMachinePlanHash(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
|
@ -810,15 +834,6 @@ func resourceArmVirtualMachineStorageOsDiskHash(v interface{}) int {
|
|||
if m["vhd_uri"] != nil {
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["vhd_uri"].(string)))
|
||||
}
|
||||
if m["managed_disk"] != nil {
|
||||
managedDisk := m["managed_disk"].(map[string]interface{})
|
||||
if managedDisk["storage_account_type"] != nil {
|
||||
buf.WriteString(fmt.Sprintf("%s-", managedDisk["storage_account_type"].(string)))
|
||||
}
|
||||
if managedDisk["managed_disk_id"] != nil {
|
||||
buf.WriteString(fmt.Sprintf("%s-", managedDisk["managed_disk_id"].(string)))
|
||||
}
|
||||
}
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
|
@ -920,7 +935,8 @@ func flattenAzureRmVirtualMachineDataDisk(disks *[]compute.DataDisk) interface{}
|
|||
l["vhd_uri"] = *disk.Vhd.URI
|
||||
}
|
||||
if disk.ManagedDisk != nil {
|
||||
l["managed_disk"] = flattenAzureRmVirtualMachineManagedDisk(disk.ManagedDisk)
|
||||
l["managed_disk_type"] = string(disk.ManagedDisk.StorageAccountType)
|
||||
l["managed_disk_id"] = *disk.ManagedDisk.ID
|
||||
}
|
||||
l["create_option"] = disk.CreateOption
|
||||
l["caching"] = string(disk.Caching)
|
||||
|
@ -1031,7 +1047,8 @@ func flattenAzureRmVirtualMachineOsDisk(disk *compute.OSDisk) []interface{} {
|
|||
result["vhd_uri"] = *disk.Vhd.URI
|
||||
}
|
||||
if disk.ManagedDisk != nil {
|
||||
result["managed_disk"] = flattenAzureRmVirtualMachineManagedDisk(disk.ManagedDisk)
|
||||
result["managed_disk_type"] = string(disk.ManagedDisk.StorageAccountType)
|
||||
result["managed_disk_id"] = *disk.ManagedDisk.ID
|
||||
}
|
||||
result["create_option"] = disk.CreateOption
|
||||
result["caching"] = disk.Caching
|
||||
|
@ -1042,12 +1059,6 @@ func flattenAzureRmVirtualMachineOsDisk(disk *compute.OSDisk) []interface{} {
|
|||
return []interface{}{result}
|
||||
}
|
||||
|
||||
func flattenAzureRmVirtualMachineManagedDisk(params *compute.ManagedDiskParameters) map[string]interface{} {
|
||||
managedDisk := make(map[string]interface{})
|
||||
managedDisk["storage_account_type"] = string(params.StorageAccountType)
|
||||
return managedDisk
|
||||
}
|
||||
|
||||
func expandAzureRmVirtualMachinePlan(d *schema.ResourceData) (*compute.Plan, error) {
|
||||
planConfigs := d.Get("plan").(*schema.Set).List()
|
||||
|
||||
|
@ -1218,22 +1229,22 @@ func expandAzureRmVirtualMachineOsProfileWindowsConfig(d *schema.ResourceData) (
|
|||
if v := osProfileConfig["winrm"]; v != nil {
|
||||
winRm := v.([]interface{})
|
||||
if len(winRm) > 0 {
|
||||
winRmListners := make([]compute.WinRMListener, 0, len(winRm))
|
||||
winRmListeners := make([]compute.WinRMListener, 0, len(winRm))
|
||||
for _, winRmConfig := range winRm {
|
||||
config := winRmConfig.(map[string]interface{})
|
||||
|
||||
protocol := config["protocol"].(string)
|
||||
winRmListner := compute.WinRMListener{
|
||||
winRmListener := compute.WinRMListener{
|
||||
Protocol: compute.ProtocolTypes(protocol),
|
||||
}
|
||||
if v := config["certificate_url"].(string); v != "" {
|
||||
winRmListner.CertificateURL = &v
|
||||
winRmListener.CertificateURL = &v
|
||||
}
|
||||
|
||||
winRmListners = append(winRmListners, winRmListner)
|
||||
winRmListeners = append(winRmListeners, winRmListener)
|
||||
}
|
||||
config.WinRM = &compute.WinRMConfiguration{
|
||||
Listeners: &winRmListners,
|
||||
Listeners: &winRmListeners,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1270,9 +1281,10 @@ func expandAzureRmVirtualMachineDataDisk(d *schema.ResourceData) ([]compute.Data
|
|||
config := disk_config.(map[string]interface{})
|
||||
|
||||
name := config["name"].(string)
|
||||
vhd := config["vhd_uri"].(string)
|
||||
managedDisk := config["managed_disk"].(map[string]interface{})
|
||||
createOption := config["create_option"].(string)
|
||||
vhdURI := config["vhd_uri"].(string)
|
||||
managedDiskType := config["managed_disk_type"].(string)
|
||||
managedDiskID := config["managed_disk_id"].(string)
|
||||
lun := int32(config["lun"].(int))
|
||||
|
||||
data_disk := compute.DataDisk{
|
||||
|
@ -1280,20 +1292,35 @@ func expandAzureRmVirtualMachineDataDisk(d *schema.ResourceData) ([]compute.Data
|
|||
Lun: &lun,
|
||||
CreateOption: compute.DiskCreateOptionTypes(createOption),
|
||||
}
|
||||
if vhd != "" && len(managedDisk) != 0 {
|
||||
return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk` (only one or the other can be used)")
|
||||
} else if vhd != "" {
|
||||
|
||||
if vhdURI != "" {
|
||||
data_disk.Vhd = &compute.VirtualHardDisk{
|
||||
URI: &vhd,
|
||||
URI: &vhdURI,
|
||||
}
|
||||
} else if managedDisk != nil {
|
||||
managedDisk, err := expandAzureRmVirtualMachineManagedDisk(managedDisk, &data_disk.CreateOption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
managedDisk := &compute.ManagedDiskParameters{}
|
||||
|
||||
if managedDiskType != "" {
|
||||
managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType)
|
||||
data_disk.ManagedDisk = managedDisk
|
||||
}
|
||||
|
||||
if managedDiskID != "" {
|
||||
managedDisk.ID = &managedDiskID
|
||||
data_disk.ManagedDisk = managedDisk
|
||||
}
|
||||
|
||||
if vhdURI != "" && managedDiskID != "" {
|
||||
return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_id` (only one or the other can be used)")
|
||||
}
|
||||
if vhdURI != "" && managedDiskType != "" {
|
||||
return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_type` (only one or the other can be used)")
|
||||
}
|
||||
if managedDiskID == "" && strings.EqualFold(string(data_disk.CreateOption), string(compute.Attach)) {
|
||||
return nil, fmt.Errorf("[ERROR] Must specify which disk to attach")
|
||||
}
|
||||
|
||||
if v := config["caching"].(string); v != "" {
|
||||
data_disk.Caching = compute.CachingTypes(v)
|
||||
}
|
||||
|
@ -1369,40 +1396,55 @@ func expandAzureRmVirtualMachineNetworkProfile(d *schema.ResourceData) compute.N
|
|||
func expandAzureRmVirtualMachineOsDisk(d *schema.ResourceData) (*compute.OSDisk, error) {
|
||||
disks := d.Get("storage_os_disk").(*schema.Set).List()
|
||||
|
||||
disk := disks[0].(map[string]interface{})
|
||||
config := disks[0].(map[string]interface{})
|
||||
|
||||
name := disk["name"].(string)
|
||||
vhdURI := disk["vhd_uri"].(string)
|
||||
managedDisk := disk["managed_disk"].(map[string]interface{})
|
||||
imageURI := disk["image_uri"].(string)
|
||||
createOption := disk["create_option"].(string)
|
||||
name := config["name"].(string)
|
||||
imageURI := config["image_uri"].(string)
|
||||
createOption := config["create_option"].(string)
|
||||
vhdURI := config["vhd_uri"].(string)
|
||||
managedDiskType := config["managed_disk_type"].(string)
|
||||
managedDiskID := config["managed_disk_id"].(string)
|
||||
|
||||
osDisk := &compute.OSDisk{
|
||||
Name: &name,
|
||||
CreateOption: compute.DiskCreateOptionTypes(createOption),
|
||||
}
|
||||
|
||||
if vhdURI != "" && len(managedDisk) != 0 {
|
||||
return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk` (only one or the other can be used)")
|
||||
} else if vhdURI != "" {
|
||||
if vhdURI != "" {
|
||||
osDisk.Vhd = &compute.VirtualHardDisk{
|
||||
URI: &vhdURI,
|
||||
}
|
||||
} else if managedDisk != nil {
|
||||
managedDisk, err := expandAzureRmVirtualMachineManagedDisk(managedDisk, &osDisk.CreateOption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
managedDisk := &compute.ManagedDiskParameters{}
|
||||
|
||||
if managedDiskType != "" {
|
||||
managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType)
|
||||
osDisk.ManagedDisk = managedDisk
|
||||
}
|
||||
|
||||
if v := disk["image_uri"].(string); v != "" {
|
||||
if managedDiskID != "" {
|
||||
managedDisk.ID = &managedDiskID
|
||||
osDisk.ManagedDisk = managedDisk
|
||||
}
|
||||
|
||||
if vhdURI != "" && managedDiskID != "" {
|
||||
return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_id` (only one or the other can be used)")
|
||||
}
|
||||
if vhdURI != "" && managedDiskType != "" {
|
||||
return nil, fmt.Errorf("[ERROR] Conflict between `vhd_uri` and `managed_disk_type` (only one or the other can be used)")
|
||||
}
|
||||
if managedDiskID == "" && strings.EqualFold(string(osDisk.CreateOption), string(compute.Attach)) {
|
||||
return nil, fmt.Errorf("[ERROR] Must specify which disk to attach")
|
||||
}
|
||||
|
||||
if v := config["image_uri"].(string); v != "" {
|
||||
osDisk.Image = &compute.VirtualHardDisk{
|
||||
URI: &imageURI,
|
||||
}
|
||||
}
|
||||
|
||||
if v := disk["os_type"].(string); v != "" {
|
||||
if v := config["os_type"].(string); v != "" {
|
||||
if v == "linux" {
|
||||
osDisk.OsType = compute.Linux
|
||||
} else if v == "windows" {
|
||||
|
@ -1412,11 +1454,11 @@ func expandAzureRmVirtualMachineOsDisk(d *schema.ResourceData) (*compute.OSDisk,
|
|||
}
|
||||
}
|
||||
|
||||
if v := disk["caching"].(string); v != "" {
|
||||
if v := config["caching"].(string); v != "" {
|
||||
osDisk.Caching = compute.CachingTypes(v)
|
||||
}
|
||||
|
||||
if v := disk["disk_size_gb"].(int); v != 0 {
|
||||
if v := config["disk_size_gb"].(int); v != 0 {
|
||||
diskSize := int32(v)
|
||||
osDisk.DiskSizeGB = &diskSize
|
||||
}
|
||||
|
@ -1424,21 +1466,6 @@ func expandAzureRmVirtualMachineOsDisk(d *schema.ResourceData) (*compute.OSDisk,
|
|||
return osDisk, nil
|
||||
}
|
||||
|
||||
func expandAzureRmVirtualMachineManagedDisk(managedDisk map[string]interface{}, createOption *compute.DiskCreateOptionTypes) (*compute.ManagedDiskParameters, error) {
|
||||
managedDiskParameters := &compute.ManagedDiskParameters{}
|
||||
if storageAccountType := managedDisk["storage_account_type"]; storageAccountType != nil {
|
||||
managedDiskParameters.StorageAccountType = compute.StorageAccountTypes(storageAccountType.(string))
|
||||
}
|
||||
if managedDisk["managed_disk_id"] != nil {
|
||||
managedDiskId := managedDisk["managed_disk_id"].(string)
|
||||
managedDiskParameters.ID = &managedDiskId
|
||||
}
|
||||
if *createOption == compute.Attach && managedDiskParameters.ID == nil {
|
||||
return nil, fmt.Errorf("[ERROR] A value is required for `managed_disk_id` when `create_option` is %s", compute.Attach)
|
||||
}
|
||||
return managedDiskParameters, nil
|
||||
}
|
||||
|
||||
func findStorageAccountResourceGroup(meta interface{}, storageAccountName string) (string, error) {
|
||||
client := meta.(*ArmClient).resourceFindClient
|
||||
filter := fmt.Sprintf("name eq '%s' and resourceType eq 'Microsoft.Storage/storageAccounts'", storageAccountName)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue