2015-04-24 18:18:24 +02:00
|
|
|
package azure
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2015-05-28 00:50:45 +02:00
|
|
|
"time"
|
2015-04-24 18:18:24 +02:00
|
|
|
|
2015-06-05 16:12:21 +02:00
|
|
|
"github.com/Azure/azure-sdk-for-go/management"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk"
|
2015-05-22 15:31:14 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
|
|
)
|
|
|
|
|
|
|
|
const dataDiskBlobStorageURL = "http://%s.blob.core.windows.net/disks/%s.vhd"
|
|
|
|
|
|
|
|
func resourceAzureDataDisk() *schema.Resource {
|
2015-04-24 18:18:24 +02:00
|
|
|
return &schema.Resource{
|
2015-05-22 15:31:14 +02:00
|
|
|
Create: resourceAzureDataDiskCreate,
|
|
|
|
Read: resourceAzureDataDiskRead,
|
|
|
|
Update: resourceAzureDataDiskUpdate,
|
|
|
|
Delete: resourceAzureDataDiskDelete,
|
2015-04-24 18:18:24 +02:00
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
2015-05-22 15:31:14 +02:00
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2015-05-28 00:50:45 +02:00
|
|
|
"label": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
"lun": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
2015-04-24 18:18:24 +02:00
|
|
|
Required: true,
|
2015-05-22 15:31:14 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"size": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
2015-05-29 00:10:21 +02:00
|
|
|
Optional: true,
|
2015-05-22 15:31:14 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"caching": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Default: "None",
|
|
|
|
},
|
|
|
|
|
2015-06-05 16:12:21 +02:00
|
|
|
"storage_service_name": &schema.Schema{
|
2015-05-22 15:31:14 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"media_link": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"source_media_link": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2015-04-24 18:18:24 +02:00
|
|
|
ForceNew: true,
|
|
|
|
},
|
2015-05-22 15:31:14 +02:00
|
|
|
|
|
|
|
"virtual_machine": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
2015-04-24 18:18:24 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
func resourceAzureDataDiskCreate(d *schema.ResourceData, meta interface{}) error {
|
2015-05-29 00:10:21 +02:00
|
|
|
mc := meta.(*Client).mgmtClient
|
2015-05-22 15:31:14 +02:00
|
|
|
|
|
|
|
if err := verifyDataDiskParameters(d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
lun := d.Get("lun").(int)
|
|
|
|
vm := d.Get("virtual_machine").(string)
|
|
|
|
|
2015-05-28 00:50:45 +02:00
|
|
|
label := d.Get("label").(string)
|
|
|
|
if label == "" {
|
|
|
|
label = fmt.Sprintf("%s-%d", vm, lun)
|
|
|
|
}
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
p := virtualmachinedisk.CreateDataDiskParameters{
|
2015-05-28 00:50:45 +02:00
|
|
|
DiskLabel: label,
|
2015-05-22 15:31:14 +02:00
|
|
|
Lun: lun,
|
|
|
|
LogicalDiskSizeInGB: d.Get("size").(int),
|
|
|
|
HostCaching: hostCaching(d),
|
|
|
|
MediaLink: mediaLink(d),
|
|
|
|
SourceMediaLink: d.Get("source_media_link").(string),
|
|
|
|
}
|
|
|
|
|
|
|
|
if name, ok := d.GetOk("name"); ok {
|
|
|
|
p.DiskName = name.(string)
|
|
|
|
}
|
2015-04-24 18:18:24 +02:00
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
log.Printf("[DEBUG] Adding data disk %d to instance: %s", lun, vm)
|
2015-05-22 22:02:48 +02:00
|
|
|
req, err := virtualmachinedisk.NewClient(mc).AddDataDisk(vm, vm, vm, p)
|
2015-05-22 15:31:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error adding data disk %d to instance %s: %s", lun, vm, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until the data disk is added
|
|
|
|
if err := mc.WaitForOperation(req, nil); err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error waiting for data disk %d to be added to instance %s: %s", lun, vm, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Retrieving data disk %d from instance %s", lun, vm)
|
2015-05-22 22:02:48 +02:00
|
|
|
disk, err := virtualmachinedisk.NewClient(mc).GetDataDisk(vm, vm, vm, lun)
|
2015-05-22 15:31:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error retrieving data disk %d from instance %s: %s", lun, vm, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId(disk.DiskName)
|
|
|
|
|
|
|
|
return resourceAzureDataDiskRead(d, meta)
|
2015-04-24 18:18:24 +02:00
|
|
|
}
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
func resourceAzureDataDiskRead(d *schema.ResourceData, meta interface{}) error {
|
2015-05-29 00:10:21 +02:00
|
|
|
mc := meta.(*Client).mgmtClient
|
2015-05-22 15:31:14 +02:00
|
|
|
|
|
|
|
lun := d.Get("lun").(int)
|
|
|
|
vm := d.Get("virtual_machine").(string)
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Retrieving data disk: %s", d.Id())
|
2015-05-22 22:02:48 +02:00
|
|
|
datadisk, err := virtualmachinedisk.NewClient(mc).GetDataDisk(vm, vm, vm, lun)
|
2015-05-22 15:31:14 +02:00
|
|
|
if err != nil {
|
2015-05-28 00:50:45 +02:00
|
|
|
if management.IsResourceNotFoundError(err) {
|
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
2015-05-22 15:31:14 +02:00
|
|
|
return fmt.Errorf("Error retrieving data disk %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.Set("name", datadisk.DiskName)
|
2015-05-28 00:50:45 +02:00
|
|
|
d.Set("label", datadisk.DiskLabel)
|
2015-05-22 15:31:14 +02:00
|
|
|
d.Set("lun", datadisk.Lun)
|
|
|
|
d.Set("size", datadisk.LogicalDiskSizeInGB)
|
|
|
|
d.Set("caching", datadisk.HostCaching)
|
|
|
|
d.Set("media_link", datadisk.MediaLink)
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Retrieving disk: %s", d.Id())
|
2015-05-22 22:02:48 +02:00
|
|
|
disk, err := virtualmachinedisk.NewClient(mc).GetDisk(d.Id())
|
2015-05-22 15:31:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error retrieving disk %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.Set("virtual_machine", disk.AttachedTo.RoleName)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAzureDataDiskUpdate(d *schema.ResourceData, meta interface{}) error {
|
2015-05-29 00:10:21 +02:00
|
|
|
mc := meta.(*Client).mgmtClient
|
|
|
|
diskClient := virtualmachinedisk.NewClient(mc)
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
lun := d.Get("lun").(int)
|
|
|
|
vm := d.Get("virtual_machine").(string)
|
|
|
|
|
2015-05-28 00:50:45 +02:00
|
|
|
if d.HasChange("lun") || d.HasChange("size") || d.HasChange("virtual_machine") {
|
|
|
|
olun, _ := d.GetChange("lun")
|
|
|
|
ovm, _ := d.GetChange("virtual_machine")
|
2015-05-22 15:31:14 +02:00
|
|
|
|
|
|
|
log.Printf("[DEBUG] Detaching data disk: %s", d.Id())
|
2015-05-29 00:10:21 +02:00
|
|
|
req, err := diskClient.
|
2015-05-28 00:50:45 +02:00
|
|
|
DeleteDataDisk(ovm.(string), ovm.(string), ovm.(string), olun.(int), false)
|
2015-05-22 15:31:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error detaching data disk %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until the data disk is detached
|
|
|
|
if err := mc.WaitForOperation(req, nil); err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error waiting for data disk %s to be detached: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
2015-05-28 00:50:45 +02:00
|
|
|
log.Printf("[DEBUG] Verifying data disk %s is properly detached...", d.Id())
|
|
|
|
for i := 0; i < 6; i++ {
|
2015-05-29 00:10:21 +02:00
|
|
|
disk, err := diskClient.GetDisk(d.Id())
|
2015-05-28 00:50:45 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error retrieving disk %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the disk is really detached
|
|
|
|
if disk.AttachedTo.RoleName == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// If not, wait 30 seconds and try it again...
|
|
|
|
time.Sleep(time.Duration(30 * time.Second))
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.HasChange("size") {
|
|
|
|
p := virtualmachinedisk.UpdateDiskParameters{
|
2015-05-29 10:17:43 +02:00
|
|
|
Name: d.Id(),
|
2015-05-28 00:50:45 +02:00
|
|
|
Label: d.Get("label").(string),
|
|
|
|
ResizedSizeInGB: d.Get("size").(int),
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Updating disk: %s", d.Id())
|
2015-05-29 00:10:21 +02:00
|
|
|
req, err := diskClient.UpdateDisk(d.Id(), p)
|
2015-05-28 00:50:45 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error updating disk %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until the disk is updated
|
|
|
|
if err := mc.WaitForOperation(req, nil); err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error waiting for disk %s to be updated: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
p := virtualmachinedisk.CreateDataDiskParameters{
|
|
|
|
DiskName: d.Id(),
|
|
|
|
Lun: lun,
|
|
|
|
HostCaching: hostCaching(d),
|
|
|
|
MediaLink: mediaLink(d),
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Attaching data disk: %s", d.Id())
|
2015-05-29 00:10:21 +02:00
|
|
|
req, err = diskClient.AddDataDisk(vm, vm, vm, p)
|
2015-05-22 15:31:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error attaching data disk %s to instance %s: %s", d.Id(), vm, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until the data disk is attached
|
|
|
|
if err := mc.WaitForOperation(req, nil); err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error waiting for data disk %s to be attached to instance %s: %s", d.Id(), vm, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we return here since all possible changes are
|
|
|
|
// already updated if we reach this point
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-28 00:50:45 +02:00
|
|
|
if d.HasChange("caching") {
|
2015-05-22 15:31:14 +02:00
|
|
|
p := virtualmachinedisk.UpdateDataDiskParameters{
|
|
|
|
DiskName: d.Id(),
|
|
|
|
Lun: lun,
|
|
|
|
HostCaching: hostCaching(d),
|
|
|
|
MediaLink: mediaLink(d),
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Updating data disk: %s", d.Id())
|
2015-05-29 00:10:21 +02:00
|
|
|
req, err := diskClient.UpdateDataDisk(vm, vm, vm, lun, p)
|
2015-05-22 15:31:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error updating data disk %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until the data disk is updated
|
|
|
|
if err := mc.WaitForOperation(req, nil); err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error waiting for data disk %s to be updated: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resourceAzureDataDiskRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAzureDataDiskDelete(d *schema.ResourceData, meta interface{}) error {
|
2015-05-29 00:10:21 +02:00
|
|
|
mc := meta.(*Client).mgmtClient
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
lun := d.Get("lun").(int)
|
|
|
|
vm := d.Get("virtual_machine").(string)
|
|
|
|
|
|
|
|
// If a name was not supplied, it means we created a new emtpy disk and we now want to
|
|
|
|
// delete that disk again. Otherwise we only want to detach the disk and keep the blob.
|
|
|
|
_, removeBlob := d.GetOk("name")
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Detaching data disk %s with removeBlob = %t", d.Id(), removeBlob)
|
2015-05-22 22:02:48 +02:00
|
|
|
req, err := virtualmachinedisk.NewClient(mc).DeleteDataDisk(vm, vm, vm, lun, removeBlob)
|
2015-05-22 15:31:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error detaching data disk %s with removeBlob = %t: %s", d.Id(), removeBlob, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until the data disk is detached and optionally deleted
|
|
|
|
if err := mc.WaitForOperation(req, nil); err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error waiting for data disk %s to be detached with removeBlob = %t: %s",
|
|
|
|
d.Id(), removeBlob, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId("")
|
2015-04-24 18:18:24 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
func hostCaching(d *schema.ResourceData) virtualmachinedisk.HostCachingType {
|
|
|
|
switch d.Get("caching").(string) {
|
|
|
|
case "ReadOnly":
|
|
|
|
return virtualmachinedisk.HostCachingTypeReadOnly
|
|
|
|
case "ReadWrite":
|
|
|
|
return virtualmachinedisk.HostCachingTypeReadWrite
|
|
|
|
default:
|
|
|
|
return virtualmachinedisk.HostCachingTypeNone
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func mediaLink(d *schema.ResourceData) string {
|
|
|
|
mediaLink, ok := d.GetOk("media_link")
|
|
|
|
if ok {
|
|
|
|
return mediaLink.(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
name, ok := d.GetOk("name")
|
|
|
|
if !ok {
|
|
|
|
name = fmt.Sprintf("%s-%d", d.Get("virtual_machine").(string), d.Get("lun").(int))
|
|
|
|
}
|
2015-04-24 18:18:24 +02:00
|
|
|
|
2015-06-05 16:12:21 +02:00
|
|
|
return fmt.Sprintf(dataDiskBlobStorageURL, d.Get("storage_service_name").(string), name.(string))
|
2015-04-24 18:18:24 +02:00
|
|
|
}
|
|
|
|
|
2015-05-22 15:31:14 +02:00
|
|
|
func verifyDataDiskParameters(d *schema.ResourceData) error {
|
|
|
|
caching := d.Get("caching").(string)
|
|
|
|
if caching != "None" && caching != "ReadOnly" && caching != "ReadWrite" {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Invalid caching type %s! Valid options are 'None', 'ReadOnly' and 'ReadWrite'.", caching)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := d.GetOk("media_link"); !ok {
|
2015-06-05 16:12:21 +02:00
|
|
|
if _, ok := d.GetOk("storage_service_name"); !ok {
|
2015-05-22 15:31:14 +02:00
|
|
|
return fmt.Errorf("If not supplying 'media_link', you must supply 'storage'.")
|
|
|
|
}
|
|
|
|
}
|
2015-04-24 18:18:24 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|