Add InstanceGroupManager to GCE.
This commit is contained in:
parent
3339593292
commit
07b44a816a
|
@ -8,13 +8,13 @@ import (
|
|||
"os"
|
||||
|
||||
"code.google.com/p/google-api-go-client/compute/v1"
|
||||
"code.google.com/p/google-api-go-client/replicapool/v1beta2"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
)
|
||||
|
||||
|
||||
// Config is the configuration structure used to instantiate the Google
|
||||
// provider.
|
||||
type Config struct {
|
||||
|
@ -22,7 +22,8 @@ type Config struct {
|
|||
Project string
|
||||
Region string
|
||||
|
||||
clientCompute *compute.Service
|
||||
clientCompute *compute.Service
|
||||
clientReplicaPool *replicapool.Service
|
||||
}
|
||||
|
||||
func (c *Config) loadAndValidate() error {
|
||||
|
@ -89,6 +90,11 @@ func (c *Config) loadAndValidate() error {
|
|||
return err
|
||||
}
|
||||
|
||||
c.clientReplicaPool, err = replicapool.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"code.google.com/p/google-api-go-client/compute/v1"
|
||||
"code.google.com/p/google-api-go-client/replicapool/v1beta2"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
|
@ -77,3 +78,50 @@ func (e OperationError) Error() string {
|
|||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Replicapool Operations
|
||||
type ReplicaPoolOperationWaiter struct {
|
||||
Service *replicapool.Service
|
||||
Op *replicapool.Operation
|
||||
Project string
|
||||
Region string
|
||||
Zone string
|
||||
}
|
||||
|
||||
func (w *ReplicaPoolOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
var op *replicapool.Operation
|
||||
var err error
|
||||
|
||||
op, err = w.Service.ZoneOperations.Get(
|
||||
w.Project, w.Zone, w.Op.Name).Do()
|
||||
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return op, op.Status, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (w *ReplicaPoolOperationWaiter) Conf() *resource.StateChangeConf {
|
||||
return &resource.StateChangeConf{
|
||||
Pending: []string{"PENDING", "RUNNING"},
|
||||
Target: "DONE",
|
||||
Refresh: w.RefreshFunc(),
|
||||
}
|
||||
}
|
||||
|
||||
// ReplicaPoolOperationError wraps replicapool.OperationError and implements the
|
||||
// error interface so it can be returned.
|
||||
type ReplicaPoolOperationError replicapool.OperationError
|
||||
|
||||
func (e ReplicaPoolOperationError) Error() string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
for _, err := range e.Errors {
|
||||
buf.WriteString(err.Message + "\n")
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
|
|
@ -29,16 +29,17 @@ func Provider() terraform.ResourceProvider {
|
|||
},
|
||||
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
"google_compute_address": resourceComputeAddress(),
|
||||
"google_compute_disk": resourceComputeDisk(),
|
||||
"google_compute_firewall": resourceComputeFirewall(),
|
||||
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
|
||||
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
|
||||
"google_compute_instance": resourceComputeInstance(),
|
||||
"google_compute_instance_template": resourceComputeInstanceTemplate(),
|
||||
"google_compute_network": resourceComputeNetwork(),
|
||||
"google_compute_route": resourceComputeRoute(),
|
||||
"google_compute_target_pool": resourceComputeTargetPool(),
|
||||
"google_compute_address": resourceComputeAddress(),
|
||||
"google_compute_disk": resourceComputeDisk(),
|
||||
"google_compute_firewall": resourceComputeFirewall(),
|
||||
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
|
||||
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
|
||||
"google_compute_instance": resourceComputeInstance(),
|
||||
"google_compute_instance_template": resourceComputeInstanceTemplate(),
|
||||
"google_compute_network": resourceComputeNetwork(),
|
||||
"google_compute_route": resourceComputeRoute(),
|
||||
"google_compute_target_pool": resourceComputeTargetPool(),
|
||||
"google_replicapool_instance_group_manager": resourceReplicaPoolInstanceGroupManager(),
|
||||
},
|
||||
|
||||
ConfigureFunc: providerConfigure,
|
||||
|
|
|
@ -0,0 +1,319 @@
|
|||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"code.google.com/p/google-api-go-client/googleapi"
|
||||
"code.google.com/p/google-api-go-client/replicapool/v1beta2"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceReplicaPoolInstanceGroupManager() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceReplicaPoolInstanceGroupManagerCreate,
|
||||
Read: resourceReplicaPoolInstanceGroupManagerRead,
|
||||
Update: resourceReplicaPoolInstanceGroupManagerUpdate,
|
||||
Delete: resourceReplicaPoolInstanceGroupManagerDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"base_instance_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"current_size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"fingerprint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"group": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"instance_template": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"target_pools": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Required: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: func(v interface{}) int {
|
||||
return hashcode.String(v.(string))
|
||||
},
|
||||
},
|
||||
|
||||
"size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"target_size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func waitOpZone(config *Config, op *replicapool.Operation, zone string,
|
||||
resource string, action string) (*replicapool.Operation, error) {
|
||||
|
||||
w := &ReplicaPoolOperationWaiter{
|
||||
Service: config.clientReplicaPool,
|
||||
Op: op,
|
||||
Project: config.Project,
|
||||
Zone: zone,
|
||||
}
|
||||
state := w.Conf()
|
||||
state.Timeout = 2 * time.Minute
|
||||
state.MinTimeout = 1 * time.Second
|
||||
opRaw, err := state.WaitForState()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err)
|
||||
}
|
||||
return opRaw.(*replicapool.Operation), nil
|
||||
}
|
||||
|
||||
func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
// Get group size
|
||||
var size int64
|
||||
if v, ok := d.GetOk("size"); ok {
|
||||
size = int64(v.(int))
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
manager := &replicapool.InstanceGroupManager{
|
||||
Name: d.Get("name").(string),
|
||||
BaseInstanceName: d.Get("base_instance_name").(string),
|
||||
InstanceTemplate: d.Get("instance_template").(string),
|
||||
}
|
||||
|
||||
// Set optional fields
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
manager.Description = v.(string)
|
||||
}
|
||||
|
||||
if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 {
|
||||
var s []string
|
||||
for _, v := range attr.List() {
|
||||
s = append(s, v.(string))
|
||||
}
|
||||
manager.TargetPools = s
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager)
|
||||
op, err := config.clientReplicaPool.InstanceGroupManagers.Insert(
|
||||
config.Project, d.Get("zone").(string), size, manager).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(manager.Name)
|
||||
|
||||
// Wait for the operation to complete
|
||||
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "create")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if op.Error != nil {
|
||||
// The resource didn't actually create
|
||||
d.SetId("")
|
||||
// Return the error
|
||||
return ReplicaPoolOperationError(*op.Error)
|
||||
}
|
||||
|
||||
return resourceReplicaPoolInstanceGroupManagerRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceReplicaPoolInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
manager, err := config.clientReplicaPool.InstanceGroupManagers.Get(
|
||||
config.Project, d.Get("zone").(string), d.Id()).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// The resource doesn't exist anymore
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error reading instance group manager: %s", err)
|
||||
}
|
||||
|
||||
// Set computed fields
|
||||
d.Set("current_size", manager.CurrentSize)
|
||||
d.Set("fingerprint", manager.Fingerprint)
|
||||
d.Set("group", manager.Group)
|
||||
d.Set("target_size", manager.TargetSize)
|
||||
d.Set("self_link", manager.SelfLink)
|
||||
|
||||
return nil
|
||||
}
|
||||
func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
// If target_pools changes then update
|
||||
if d.HasChange("target_pools") {
|
||||
var targetPools []string
|
||||
if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 {
|
||||
for _, v := range attr.List() {
|
||||
targetPools = append(targetPools, v.(string))
|
||||
}
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
setTargetPools := &replicapool.InstanceGroupManagersSetTargetPoolsRequest{
|
||||
Fingerprint: d.Get("fingerprint").(string),
|
||||
TargetPools: targetPools,
|
||||
}
|
||||
|
||||
op, err := config.clientReplicaPool.InstanceGroupManagers.SetTargetPools(
|
||||
config.Project, d.Get("zone").(string), d.Id(), setTargetPools).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update TargetPools")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if op.Error != nil {
|
||||
return ReplicaPoolOperationError(*op.Error)
|
||||
}
|
||||
|
||||
d.SetPartial("target_pools")
|
||||
}
|
||||
|
||||
// If instance_template changes then update
|
||||
if d.HasChange("instance_template") {
|
||||
// Build the parameter
|
||||
setInstanceTemplate := &replicapool.InstanceGroupManagersSetInstanceTemplateRequest{
|
||||
InstanceTemplate: d.Get("instance_template").(string),
|
||||
}
|
||||
|
||||
op, err := config.clientReplicaPool.InstanceGroupManagers.SetInstanceTemplate(
|
||||
config.Project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update instance template")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if op.Error != nil {
|
||||
return ReplicaPoolOperationError(*op.Error)
|
||||
}
|
||||
|
||||
d.SetPartial("instance_template")
|
||||
}
|
||||
|
||||
// If size changes trigger a resize
|
||||
if d.HasChange("size") {
|
||||
var size int64
|
||||
if v, ok := d.GetOk("size"); ok {
|
||||
size = int64(v.(int))
|
||||
}
|
||||
|
||||
op, err := config.clientReplicaPool.InstanceGroupManagers.Resize(
|
||||
config.Project, d.Get("zone").(string), d.Id(), size).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update size")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if op.Error != nil {
|
||||
return ReplicaPoolOperationError(*op.Error)
|
||||
}
|
||||
|
||||
d.SetPartial("size")
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceReplicaPoolInstanceGroupManagerRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceReplicaPoolInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
zone := d.Get("zone").(string)
|
||||
op, err := config.clientReplicaPool.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting instance group manager: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
w := &ReplicaPoolOperationWaiter{
|
||||
Service: config.clientReplicaPool,
|
||||
Op: op,
|
||||
Project: config.Project,
|
||||
Zone: d.Get("zone").(string),
|
||||
}
|
||||
state := w.Conf()
|
||||
state.Timeout = 2 * time.Minute
|
||||
state.MinTimeout = 1 * time.Second
|
||||
opRaw, err := state.WaitForState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for InstanceGroupManager to delete: %s", err)
|
||||
}
|
||||
op = opRaw.(*replicapool.Operation)
|
||||
if op.Error != nil {
|
||||
// The resource didn't actually create
|
||||
d.SetId("")
|
||||
|
||||
// Return the error
|
||||
return ReplicaPoolOperationError(*op.Error)
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,365 @@
|
|||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/google-api-go-client/replicapool/v1beta2"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccInstanceGroupManager_basic(t *testing.T) {
|
||||
var manager replicapool.InstanceGroupManager
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_replicapool_instance_group_manager.foobar", &manager),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccInstanceGroupManager_update(t *testing.T) {
|
||||
var manager replicapool.InstanceGroupManager
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_replicapool_instance_group_manager.foobar", &manager),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_replicapool_instance_group_manager.foobar", &manager),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_update2,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_replicapool_instance_group_manager.foobar", &manager),
|
||||
testAccCheckInstanceGroupManagerUpdated(
|
||||
"google_replicapool_instance_group_manager.foobar", 3,
|
||||
"google_compute_target_pool.foobaz", "terraform-test-foobaz"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_replicapool_instance_group_manager" {
|
||||
continue
|
||||
}
|
||||
_, err := config.clientReplicaPool.InstanceGroupManagers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("InstanceGroupManager still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerExists(n string, manager *replicapool.InstanceGroupManager) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientReplicaPool.InstanceGroupManagers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("InstanceGroupManager not found")
|
||||
}
|
||||
|
||||
*manager = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", rs)
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
manager, err := config.clientReplicaPool.InstanceGroupManagers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check that total instance count is "size"
|
||||
log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", manager.TargetSize)
|
||||
if manager.CurrentSize != size {
|
||||
return fmt.Errorf("instance count incorrect")
|
||||
}
|
||||
|
||||
// check that at least one instance exists in "targetpool"
|
||||
tp, ok := s.RootModule().Resources[targetPool]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", targetPool)
|
||||
}
|
||||
|
||||
if tp.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", tp)
|
||||
|
||||
targetpool, err := config.clientCompute.TargetPools.Get(
|
||||
config.Project, config.Region, tp.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check that total instance count is "size"
|
||||
log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", len(targetpool.Instances))
|
||||
if len(targetpool.Instances) == 0 {
|
||||
return fmt.Errorf("no instance in new targetpool")
|
||||
}
|
||||
|
||||
// check that the instance template updated
|
||||
instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
|
||||
config.Project, template).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading instance template: %s", err)
|
||||
}
|
||||
|
||||
if instanceTemplate.Name != template {
|
||||
return fmt.Errorf("instance template not updated")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccInstanceGroupManager_basic = `
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "terraform-test-foobar"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network {
|
||||
source = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "terraform-test-foobar"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_replicapool_instance_group_manager" "foobar" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "terraform-test"
|
||||
instance_template = "${google_compute_instance_template.foobar.self_link}"
|
||||
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
|
||||
base_instance_name = "foobar"
|
||||
zone = "us-central1-a"
|
||||
size = 2
|
||||
}`
|
||||
|
||||
const testAccInstanceGroupManager_update = `
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "terraform-test-foobar"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network {
|
||||
source = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobaz" {
|
||||
name = "terraform-test-foobaz"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network {
|
||||
source = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "terraform-test-foobar"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "foobaz" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "terraform-test-foobaz"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_replicapool_instance_group_manager" "foobar" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "terraform-test"
|
||||
instance_template = "${google_compute_instance_template.foobar.self_link}"
|
||||
target_pools = ["${google_compute_target_pool.foobaz.self_link}"]
|
||||
base_instance_name = "foobar"
|
||||
zone = "us-central1-a"
|
||||
size = 2
|
||||
}`
|
||||
|
||||
const testAccInstanceGroupManager_update2 = `
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "terraform-test-foobar"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network {
|
||||
source = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobaz" {
|
||||
name = "terraform-test-foobaz"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network {
|
||||
source = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "terraform-test-foobar"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "foobaz" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "terraform-test-foobaz"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_replicapool_instance_group_manager" "foobar" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "terraform-test"
|
||||
instance_template = "${google_compute_instance_template.foobaz.self_link}"
|
||||
target_pools = ["${google_compute_target_pool.foobaz.self_link}"]
|
||||
base_instance_name = "foobar"
|
||||
zone = "us-central1-a"
|
||||
size = 3
|
||||
}`
|
Loading…
Reference in New Issue