provider/google: add attached_disk field to google_compute_instance (#13443)
This commit is contained in:
parent
9c42866887
commit
e7c3575499
|
@ -28,7 +28,7 @@ func resourceComputeInstance() *schema.Resource {
|
|||
Schema: map[string]*schema.Schema{
|
||||
"disk": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
|
@ -91,6 +91,40 @@ func resourceComputeInstance() *schema.Resource {
|
|||
},
|
||||
},
|
||||
|
||||
// Preferred way of adding persistent disks to an instance.
|
||||
// Use this instead of `disk` when possible.
|
||||
"attached_disk": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true, // TODO(danawillow): Remove this, support attaching/detaching
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"source": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"device_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"disk_encryption_key_raw": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Sensitive: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_encryption_key_sha256": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"machine_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
|
@ -371,7 +405,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||
|
||||
// Build up the list of disks
|
||||
disksCount := d.Get("disk.#").(int)
|
||||
disks := make([]*compute.AttachedDisk, 0, disksCount)
|
||||
attachedDisksCount := d.Get("attached_disk.#").(int)
|
||||
if disksCount+attachedDisksCount == 0 {
|
||||
return fmt.Errorf("At least one disk or attached_disk must be set")
|
||||
}
|
||||
disks := make([]*compute.AttachedDisk, 0, disksCount+attachedDisksCount)
|
||||
for i := 0; i < disksCount; i++ {
|
||||
prefix := fmt.Sprintf("disk.%d", i)
|
||||
|
||||
|
@ -457,6 +495,28 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||
disks = append(disks, &disk)
|
||||
}
|
||||
|
||||
for i := 0; i < attachedDisksCount; i++ {
|
||||
prefix := fmt.Sprintf("attached_disk.%d", i)
|
||||
disk := compute.AttachedDisk{
|
||||
Source: d.Get(prefix + ".source").(string),
|
||||
AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion
|
||||
}
|
||||
|
||||
disk.Boot = i == 0 && disksCount == 0 // TODO(danawillow): This is super hacky, let's just add a boot field.
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".device_name"); ok {
|
||||
disk.DeviceName = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok {
|
||||
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{
|
||||
RawKey: v.(string),
|
||||
}
|
||||
}
|
||||
|
||||
disks = append(disks, &disk)
|
||||
}
|
||||
|
||||
networksCount := d.Get("network.#").(int)
|
||||
networkInterfacesCount := d.Get("network_interface.#").(int)
|
||||
|
||||
|
@ -791,24 +851,54 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
|
|||
d.Set("tags_fingerprint", instance.Tags.Fingerprint)
|
||||
}
|
||||
|
||||
disks := make([]map[string]interface{}, 0, 1)
|
||||
for i, disk := range instance.Disks {
|
||||
di := map[string]interface{}{
|
||||
"disk": d.Get(fmt.Sprintf("disk.%d.disk", i)),
|
||||
"image": d.Get(fmt.Sprintf("disk.%d.image", i)),
|
||||
"type": d.Get(fmt.Sprintf("disk.%d.type", i)),
|
||||
"scratch": d.Get(fmt.Sprintf("disk.%d.scratch", i)),
|
||||
"auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", i)),
|
||||
"size": d.Get(fmt.Sprintf("disk.%d.size", i)),
|
||||
"device_name": d.Get(fmt.Sprintf("disk.%d.device_name", i)),
|
||||
"disk_encryption_key_raw": d.Get(fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)),
|
||||
disksCount := d.Get("disk.#").(int)
|
||||
attachedDisksCount := d.Get("attached_disk.#").(int)
|
||||
disks := make([]map[string]interface{}, 0, disksCount)
|
||||
attachedDisks := make([]map[string]interface{}, 0, attachedDisksCount)
|
||||
|
||||
if expectedDisks := disksCount + attachedDisksCount; len(instance.Disks) != expectedDisks {
|
||||
return fmt.Errorf("Expected %d disks, API returned %d", expectedDisks, len(instance.Disks))
|
||||
}
|
||||
|
||||
attachedDiskSources := make(map[string]struct{}, attachedDisksCount)
|
||||
for i := 0; i < attachedDisksCount; i++ {
|
||||
attachedDiskSources[d.Get(fmt.Sprintf("attached_disk.%d.source", i)).(string)] = struct{}{}
|
||||
}
|
||||
|
||||
dIndex := 0
|
||||
adIndex := 0
|
||||
for _, disk := range instance.Disks {
|
||||
if _, ok := attachedDiskSources[disk.Source]; !ok {
|
||||
di := map[string]interface{}{
|
||||
"disk": d.Get(fmt.Sprintf("disk.%d.disk", dIndex)),
|
||||
"image": d.Get(fmt.Sprintf("disk.%d.image", dIndex)),
|
||||
"type": d.Get(fmt.Sprintf("disk.%d.type", dIndex)),
|
||||
"scratch": d.Get(fmt.Sprintf("disk.%d.scratch", dIndex)),
|
||||
"auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", dIndex)),
|
||||
"size": d.Get(fmt.Sprintf("disk.%d.size", dIndex)),
|
||||
"device_name": d.Get(fmt.Sprintf("disk.%d.device_name", dIndex)),
|
||||
"disk_encryption_key_raw": d.Get(fmt.Sprintf("disk.%d.disk_encryption_key_raw", dIndex)),
|
||||
}
|
||||
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
|
||||
di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256
|
||||
}
|
||||
disks = append(disks, di)
|
||||
dIndex++
|
||||
} else {
|
||||
di := map[string]interface{}{
|
||||
"source": disk.Source,
|
||||
"device_name": disk.DeviceName,
|
||||
"disk_encryption_key_raw": d.Get(fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex)),
|
||||
}
|
||||
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
|
||||
di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256
|
||||
}
|
||||
attachedDisks = append(attachedDisks, di)
|
||||
adIndex++
|
||||
}
|
||||
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
|
||||
di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256
|
||||
}
|
||||
disks = append(disks, di)
|
||||
}
|
||||
d.Set("disk", disks)
|
||||
d.Set("attached_disk", attachedDisks)
|
||||
|
||||
d.Set("self_link", instance.SelfLink)
|
||||
d.SetId(instance.Name)
|
||||
|
|
|
@ -244,6 +244,44 @@ func TestAccComputeInstance_diskEncryption(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstance_attachedDisk(t *testing.T) {
|
||||
var instance compute.Instance
|
||||
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstance_attachedDisk(diskName, instanceName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceExists(
|
||||
"google_compute_instance.foobar", &instance),
|
||||
testAccCheckComputeInstanceDisk(&instance, diskName, false, true),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstance_noDisk(t *testing.T) {
|
||||
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstance_noDisk(instanceName),
|
||||
ExpectError: regexp.MustCompile("At least one disk or attached_disk must be set"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstance_local_ssd(t *testing.T) {
|
||||
var instance compute.Instance
|
||||
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
|
@ -1121,6 +1159,51 @@ func testAccComputeInstance_disks_encryption(disk, instance string) string {
|
|||
}`, disk, instance)
|
||||
}
|
||||
|
||||
func testAccComputeInstance_attachedDisk(disk, instance string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foobar" {
|
||||
name = "%s"
|
||||
size = 10
|
||||
type = "pd-ssd"
|
||||
zone = "us-central1-a"
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "foobar" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
zone = "us-central1-a"
|
||||
|
||||
attached_disk {
|
||||
source = "${google_compute_disk.foobar.self_link}"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
}`, disk, instance)
|
||||
}
|
||||
|
||||
func testAccComputeInstance_noDisk(instance string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance" "foobar" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
zone = "us-central1-a"
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
}`, instance)
|
||||
}
|
||||
|
||||
func testAccComputeInstance_local_ssd(instance string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance" "local-ssd" {
|
||||
|
|
Loading…
Reference in New Issue