2014-08-26 01:23:28 +02:00
|
|
|
package google
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2017-05-19 02:28:16 +02:00
|
|
|
"regexp"
|
2017-05-30 15:16:12 +02:00
|
|
|
"strings"
|
2014-08-26 01:23:28 +02:00
|
|
|
|
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2015-03-18 18:10:39 +01:00
|
|
|
"google.golang.org/api/compute/v1"
|
|
|
|
"google.golang.org/api/googleapi"
|
2014-08-26 01:23:28 +02:00
|
|
|
)
|
|
|
|
|
2017-05-19 02:28:16 +02:00
|
|
|
const (
|
|
|
|
computeDiskUserRegexString = "^(?:https://www.googleapis.com/compute/v1/projects/)?([-_a-zA-Z0-9]*)/zones/([-_a-zA-Z0-9]*)/instances/([-_a-zA-Z0-9]*)$"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
computeDiskUserRegex = regexp.MustCompile(computeDiskUserRegexString)
|
|
|
|
)
|
|
|
|
|
2014-08-26 01:23:28 +02:00
|
|
|
func resourceComputeDisk() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceComputeDiskCreate,
|
|
|
|
Read: resourceComputeDiskRead,
|
2017-06-05 19:19:57 +02:00
|
|
|
Update: resourceComputeDiskUpdate,
|
2014-08-26 01:23:28 +02:00
|
|
|
Delete: resourceComputeDiskDelete,
|
2017-05-30 15:16:12 +02:00
|
|
|
Importer: &schema.ResourceImporter{
|
|
|
|
State: schema.ImportStatePassthrough,
|
|
|
|
},
|
2014-08-26 01:23:28 +02:00
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"zone": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2017-01-18 14:49:48 +01:00
|
|
|
"disk_encryption_key_raw": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Sensitive: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"disk_encryption_key_sha256": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2014-08-26 01:23:28 +02:00
|
|
|
"image": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2016-04-10 23:34:15 +02:00
|
|
|
"project": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2014-08-26 01:23:28 +02:00
|
|
|
"size": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
2014-10-07 06:59:09 +02:00
|
|
|
|
2016-04-10 23:34:15 +02:00
|
|
|
"self_link": &schema.Schema{
|
2014-10-10 23:50:35 +02:00
|
|
|
Type: schema.TypeString,
|
2016-04-10 23:34:15 +02:00
|
|
|
Computed: true,
|
2014-10-07 06:59:09 +02:00
|
|
|
},
|
2015-02-10 12:13:55 +01:00
|
|
|
|
2015-04-08 13:21:39 +02:00
|
|
|
"snapshot": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2016-04-10 23:34:15 +02:00
|
|
|
"type": &schema.Schema{
|
2016-04-10 18:59:57 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2017-05-19 02:28:16 +02:00
|
|
|
"users": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Computed: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
2014-08-26 01:23:28 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 18:59:57 +02:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-10-07 06:59:09 +02:00
|
|
|
// Get the zone
|
|
|
|
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
|
|
|
|
zone, err := config.clientCompute.Zones.Get(
|
2016-04-10 18:59:57 +02:00
|
|
|
project, d.Get("zone").(string)).Do()
|
2014-10-07 06:59:09 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error loading zone '%s': %s", d.Get("zone").(string), err)
|
|
|
|
}
|
|
|
|
|
2014-08-26 01:23:28 +02:00
|
|
|
// Build the disk parameter
|
|
|
|
disk := &compute.Disk{
|
|
|
|
Name: d.Get("name").(string),
|
|
|
|
SizeGb: int64(d.Get("size").(int)),
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we were given a source image, load that.
|
|
|
|
if v, ok := d.GetOk("image"); ok {
|
2015-01-30 02:00:02 +01:00
|
|
|
log.Printf("[DEBUG] Resolving image name: %s", v.(string))
|
|
|
|
imageUrl, err := resolveImage(config, v.(string))
|
2014-08-26 01:23:28 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
2015-01-30 02:00:02 +01:00
|
|
|
"Error resolving image name '%s': %s",
|
2014-08-26 01:23:28 +02:00
|
|
|
v.(string), err)
|
|
|
|
}
|
|
|
|
|
2015-01-30 02:00:02 +01:00
|
|
|
disk.SourceImage = imageUrl
|
2017-01-24 01:45:06 +01:00
|
|
|
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
|
2014-08-26 01:23:28 +02:00
|
|
|
}
|
|
|
|
|
2014-10-07 06:59:09 +02:00
|
|
|
if v, ok := d.GetOk("type"); ok {
|
|
|
|
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
|
|
|
|
diskType, err := readDiskType(config, zone, v.(string))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error loading disk type '%s': %s",
|
|
|
|
v.(string), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
disk.Type = diskType.SelfLink
|
|
|
|
}
|
|
|
|
|
2015-04-08 13:21:39 +02:00
|
|
|
if v, ok := d.GetOk("snapshot"); ok {
|
|
|
|
snapshotName := v.(string)
|
2017-03-06 22:59:40 +01:00
|
|
|
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)
|
2017-02-27 12:45:36 +01:00
|
|
|
if match {
|
2017-03-06 22:59:40 +01:00
|
|
|
disk.SourceSnapshot = snapshotName
|
2017-02-27 12:45:36 +01:00
|
|
|
} else {
|
2017-03-06 22:59:40 +01:00
|
|
|
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
|
|
|
|
snapshotData, err := config.clientCompute.Snapshots.Get(
|
|
|
|
project, snapshotName).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error loading snapshot '%s': %s",
|
|
|
|
snapshotName, err)
|
|
|
|
}
|
|
|
|
disk.SourceSnapshot = snapshotData.SelfLink
|
2015-04-08 13:21:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-18 14:49:48 +01:00
|
|
|
if v, ok := d.GetOk("disk_encryption_key_raw"); ok {
|
|
|
|
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{}
|
|
|
|
disk.DiskEncryptionKey.RawKey = v.(string)
|
|
|
|
}
|
|
|
|
|
2014-08-26 01:23:28 +02:00
|
|
|
op, err := config.clientCompute.Disks.Insert(
|
2016-04-10 18:59:57 +02:00
|
|
|
project, d.Get("zone").(string), disk).Do()
|
2014-08-26 01:23:28 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating disk: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// It probably maybe worked, so store the ID now
|
|
|
|
d.SetId(disk.Name)
|
|
|
|
|
2016-06-06 19:35:13 +02:00
|
|
|
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Disk")
|
2014-08-26 07:44:27 +02:00
|
|
|
if err != nil {
|
2015-09-24 22:30:12 +02:00
|
|
|
return err
|
2014-08-26 07:44:27 +02:00
|
|
|
}
|
2014-08-26 01:23:28 +02:00
|
|
|
return resourceComputeDiskRead(d, meta)
|
|
|
|
}
|
|
|
|
|
2017-06-05 19:19:57 +02:00
|
|
|
func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.HasChange("size") {
|
|
|
|
rb := &compute.DisksResizeRequest{
|
|
|
|
SizeGb: int64(d.Get("size").(int)),
|
|
|
|
}
|
|
|
|
_, err := config.clientCompute.Disks.Resize(
|
|
|
|
project, d.Get("zone").(string), d.Id(), rb).Do()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error resizing disk: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-06 16:44:23 +02:00
|
|
|
return resourceComputeDiskRead(d, meta)
|
2017-06-05 19:19:57 +02:00
|
|
|
}
|
|
|
|
|
2014-08-26 01:23:28 +02:00
|
|
|
func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 18:59:57 +02:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-30 15:16:12 +02:00
|
|
|
region, err := getRegion(d, config)
|
2014-08-26 01:23:28 +02:00
|
|
|
if err != nil {
|
2017-05-30 15:16:12 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
getDisk := func(zone string) (interface{}, error) {
|
|
|
|
return config.clientCompute.Disks.Get(project, zone, d.Id()).Do()
|
|
|
|
}
|
|
|
|
|
|
|
|
var disk *compute.Disk
|
|
|
|
if zone, ok := d.GetOk("zone"); ok {
|
|
|
|
disk, err = config.clientCompute.Disks.Get(
|
|
|
|
project, zone.(string), d.Id()).Do()
|
|
|
|
if err != nil {
|
|
|
|
return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string)))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If the resource was imported, the only info we have is the ID. Try to find the resource
|
|
|
|
// by searching in the region of the project.
|
|
|
|
var resource interface{}
|
|
|
|
resource, err = getZonalResourceFromRegion(getDisk, region, config.clientCompute, project)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
disk = resource.(*compute.Disk)
|
2014-08-26 01:23:28 +02:00
|
|
|
}
|
|
|
|
|
2017-05-30 15:16:12 +02:00
|
|
|
zoneUrlParts := strings.Split(disk.Zone, "/")
|
|
|
|
typeUrlParts := strings.Split(disk.Type, "/")
|
|
|
|
d.Set("name", disk.Name)
|
2015-02-10 12:13:55 +01:00
|
|
|
d.Set("self_link", disk.SelfLink)
|
2017-05-30 15:16:12 +02:00
|
|
|
d.Set("type", typeUrlParts[len(typeUrlParts)-1])
|
|
|
|
d.Set("zone", zoneUrlParts[len(zoneUrlParts)-1])
|
|
|
|
d.Set("size", disk.SizeGb)
|
|
|
|
d.Set("users", disk.Users)
|
2017-01-18 14:49:48 +01:00
|
|
|
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
|
|
|
|
d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256)
|
|
|
|
}
|
2017-05-30 15:16:12 +02:00
|
|
|
if disk.SourceImage != "" {
|
|
|
|
imageUrlParts := strings.Split(disk.SourceImage, "/")
|
|
|
|
d.Set("image", imageUrlParts[len(imageUrlParts)-1])
|
|
|
|
}
|
2017-06-06 16:44:23 +02:00
|
|
|
d.Set("snapshot", disk.SourceSnapshot)
|
2015-02-10 12:13:55 +01:00
|
|
|
|
2014-08-26 01:23:28 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 18:59:57 +02:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-19 02:28:16 +02:00
|
|
|
// if disks are attached, they must be detached before the disk can be deleted
|
|
|
|
if instances, ok := d.Get("users").([]interface{}); ok {
|
|
|
|
type detachArgs struct{ project, zone, instance, deviceName string }
|
|
|
|
var detachCalls []detachArgs
|
|
|
|
self := d.Get("self_link").(string)
|
|
|
|
for _, instance := range instances {
|
|
|
|
if !computeDiskUserRegex.MatchString(instance.(string)) {
|
|
|
|
return fmt.Errorf("Unknown user %q of disk %q", instance, self)
|
|
|
|
}
|
|
|
|
matches := computeDiskUserRegex.FindStringSubmatch(instance.(string))
|
|
|
|
instanceProject := matches[1]
|
|
|
|
instanceZone := matches[2]
|
|
|
|
instanceName := matches[3]
|
|
|
|
i, err := config.clientCompute.Instances.Get(instanceProject, instanceZone, instanceName).Do()
|
|
|
|
if err != nil {
|
|
|
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
|
|
|
log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance.(string))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return fmt.Errorf("Error retrieving instance %s: %s", instance.(string), err.Error())
|
|
|
|
}
|
|
|
|
for _, disk := range i.Disks {
|
|
|
|
if disk.Source == self {
|
|
|
|
detachCalls = append(detachCalls, detachArgs{
|
|
|
|
project: project,
|
|
|
|
zone: i.Zone,
|
|
|
|
instance: i.Name,
|
|
|
|
deviceName: disk.DeviceName,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, call := range detachCalls {
|
|
|
|
op, err := config.clientCompute.Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project,
|
|
|
|
call.zone, call.instance, err.Error())
|
|
|
|
}
|
|
|
|
err = computeOperationWaitZone(config, op, call.project, call.zone,
|
|
|
|
fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-26 07:44:27 +02:00
|
|
|
// Delete the disk
|
2014-08-26 01:23:28 +02:00
|
|
|
op, err := config.clientCompute.Disks.Delete(
|
2016-04-10 18:59:57 +02:00
|
|
|
project, d.Get("zone").(string), d.Id()).Do()
|
2014-08-26 01:23:28 +02:00
|
|
|
if err != nil {
|
2016-05-16 20:57:04 +02:00
|
|
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
|
|
|
log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string))
|
|
|
|
// The resource doesn't exist anymore
|
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
2014-08-26 01:23:28 +02:00
|
|
|
return fmt.Errorf("Error deleting disk: %s", err)
|
|
|
|
}
|
|
|
|
|
2015-09-24 22:30:12 +02:00
|
|
|
zone := d.Get("zone").(string)
|
2017-03-08 16:34:49 +01:00
|
|
|
err = computeOperationWaitZone(config, op, project, zone, "Deleting Disk")
|
2014-08-26 07:44:27 +02:00
|
|
|
if err != nil {
|
2015-09-24 22:30:12 +02:00
|
|
|
return err
|
2014-08-26 01:23:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|