diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index 8803868f9..6af5fbd61 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -8,6 +8,7 @@ import ( "os" "runtime" + // TODO(dcunnin): Use version code from version.go // "github.com/hashicorp/terraform" "golang.org/x/oauth2" @@ -26,10 +27,10 @@ type Config struct { Project string Region string - clientCompute *compute.Service + clientCompute *compute.Service clientContainer *container.Service - clientDns *dns.Service - clientStorage *storage.Service + clientDns *dns.Service + clientStorage *storage.Service } func (c *Config) loadAndValidate() error { diff --git a/builtin/providers/google/operation.go b/builtin/providers/google/operation.go index b1f2f255b..aef4576c4 100644 --- a/builtin/providers/google/operation.go +++ b/builtin/providers/google/operation.go @@ -5,7 +5,6 @@ import ( "fmt" "google.golang.org/api/compute/v1" - "github.com/hashicorp/terraform/helper/resource" ) @@ -25,8 +24,8 @@ type OperationWaiter struct { Op *compute.Operation Project string Region string - Zone string Type OperationWaitType + Zone string } func (w *OperationWaiter) RefreshFunc() resource.StateRefreshFunc { @@ -78,3 +77,4 @@ func (e OperationError) Error() string { return buf.String() } + diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index b19d9fcea..30cef8c1b 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -29,20 +29,22 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "google_compute_address": resourceComputeAddress(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_autoscaler": resourceComputeAutoscaler(), + "google_compute_address": resourceComputeAddress(), + "google_compute_disk": resourceComputeDisk(), + "google_compute_firewall": resourceComputeFirewall(), + "google_compute_forwarding_rule": resourceComputeForwardingRule(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), - "google_compute_instance": resourceComputeInstance(), + "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_route": resourceComputeRoute(), - "google_compute_target_pool": resourceComputeTargetPool(), - "google_container_cluster": resourceContainerCluster(), - "google_dns_managed_zone": resourceDnsManagedZone(), - "google_dns_record_set": resourceDnsRecordSet(), - "google_storage_bucket": resourceStorageBucket(), + "google_compute_network": resourceComputeNetwork(), + "google_compute_route": resourceComputeRoute(), + "google_compute_target_pool": resourceComputeTargetPool(), + "google_container_cluster": resourceContainerCluster(), + "google_dns_managed_zone": resourceDnsManagedZone(), + "google_dns_record_set": resourceDnsRecordSet(), + "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), + "google_storage_bucket": resourceStorageBucket(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/google/resource_compute_autoscaler.go b/builtin/providers/google/resource_compute_autoscaler.go new file mode 100644 index 000000000..35c8167ff --- /dev/null +++ b/builtin/providers/google/resource_compute_autoscaler.go @@ -0,0 +1,352 @@ +package google + +import ( + "fmt" + "log" + "time" + + "google.golang.org/api/googleapi" + "google.golang.org/api/compute/v1" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeAutoscaler() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeAutoscalerCreate, + Read: resourceComputeAutoscalerRead, + Update: resourceComputeAutoscalerUpdate, + Delete: resourceComputeAutoscalerDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "autoscaling_policy": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_replicas": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "max_replicas": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "cooldown_period": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 60, + }, + + "cpu_utilization": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target": &schema.Schema{ + Type: schema.TypeFloat, + Required: true, + }, + }, + }, + }, + + "metric": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "target": &schema.Schema{ + Type: schema.TypeFloat, + Required: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "load_balancing_utilization": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target": &schema.Schema{ + Type: schema.TypeFloat, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) { + + // Build the parameter + scaler := &compute.Autoscaler{ + Name: d.Get("name").(string), + Target: d.Get("target").(string), + } + + // Optional fields + if v, ok := d.GetOk("description"); ok { + scaler.Description = v.(string) + } + + aspCount := d.Get("autoscaling_policy.#").(int) + if aspCount != 1 { + return nil, fmt.Errorf("The autoscaler must have exactly one autoscaling_policy, found %d.", aspCount) + } + + prefix := "autoscaling_policy.0." + + scaler.AutoscalingPolicy = &compute.AutoscalingPolicy{ + MaxNumReplicas: int64(d.Get(prefix + "max_replicas").(int)), + MinNumReplicas: int64(d.Get(prefix + "min_replicas").(int)), + CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)), + } + + // Check that only one autoscaling policy is defined + + policyCounter := 0 + if _, ok := d.GetOk(prefix + "cpu_utilization"); ok { + if d.Get(prefix+"cpu_utilization.0.target").(float64) != 0 { + cpuUtilCount := d.Get(prefix + "cpu_utilization.#").(int) + if cpuUtilCount != 1 { + return nil, fmt.Errorf("The autoscaling_policy must have exactly one cpu_utilization, found %d.", cpuUtilCount) + } + policyCounter++ + scaler.AutoscalingPolicy.CpuUtilization = &compute.AutoscalingPolicyCpuUtilization{ + UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64), + } + } + } + if _, ok := d.GetOk("autoscaling_policy.0.metric"); ok { + if d.Get(prefix+"metric.0.name") != "" { + policyCounter++ + metricCount := d.Get(prefix + "metric.#").(int) + if metricCount != 1 { + return nil, fmt.Errorf("The autoscaling_policy must have exactly one metric, found %d.", metricCount) + } + scaler.AutoscalingPolicy.CustomMetricUtilizations = []*compute.AutoscalingPolicyCustomMetricUtilization{ + { + Metric: d.Get(prefix + "metric.0.name").(string), + UtilizationTarget: d.Get(prefix + "metric.0.target").(float64), + UtilizationTargetType: d.Get(prefix + "metric.0.type").(string), + }, + } + } + + } + if _, ok := d.GetOk("autoscaling_policy.0.load_balancing_utilization"); ok { + if d.Get(prefix+"load_balancing_utilization.0.target").(float64) != 0 { + policyCounter++ + lbuCount := d.Get(prefix + "load_balancing_utilization.#").(int) + if lbuCount != 1 { + return nil, fmt.Errorf("The autoscaling_policy must have exactly one load_balancing_utilization, found %d.", lbuCount) + } + scaler.AutoscalingPolicy.LoadBalancingUtilization = &compute.AutoscalingPolicyLoadBalancingUtilization{ + UtilizationTarget: d.Get(prefix + "load_balancing_utilization.0.target").(float64), + } + } + } + + if policyCounter != 1 { + return nil, fmt.Errorf("One policy must be defined for an autoscaler.") + } + + return scaler, nil +} + +func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the zone + log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) + zone, err := config.clientCompute.Zones.Get( + config.Project, d.Get("zone").(string)).Do() + if err != nil { + return fmt.Errorf( + "Error loading zone '%s': %s", d.Get("zone").(string), err) + } + + scaler, err := buildAutoscaler(d) + if err != nil { + return err + } + + op, err := config.clientCompute.Autoscalers.Insert( + config.Project, zone.Name, scaler).Do() + if err != nil { + return fmt.Errorf("Error creating Autoscaler: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(scaler.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitZone, + Zone: zone.Name, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Autoscaler to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeAutoscalerRead(d, meta) +} + +func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + scaler, err := config.clientCompute.Autoscalers.Get( + config.Project, zone, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading Autoscaler: %s", err) + } + + d.Set("self_link", scaler.SelfLink) + + return nil +} + +func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + + scaler, err := buildAutoscaler(d) + if err != nil { + return err + } + + op, err := config.clientCompute.Autoscalers.Patch( + config.Project, zone, d.Id(), scaler).Do() + if err != nil { + return fmt.Errorf("Error updating Autoscaler: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(scaler.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitZone, + Zone: zone, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Autoscaler to update: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeAutoscalerRead(d, meta) +} + +func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + op, err := config.clientCompute.Autoscalers.Delete( + config.Project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting autoscaler: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitZone, + Zone: zone, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Autoscaler to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_autoscaler_test.go b/builtin/providers/google/resource_compute_autoscaler_test.go new file mode 100644 index 000000000..fbc900510 --- /dev/null +++ b/builtin/providers/google/resource_compute_autoscaler_test.go @@ -0,0 +1,245 @@ +package google + +import ( + "fmt" + "testing" + + "google.golang.org/api/compute/v1" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAutoscaler_basic(t *testing.T) { + var ascaler compute.Autoscaler + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAutoscalerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAutoscaler_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckAutoscalerExists( + "google_compute_autoscaler.foobar", &ascaler), + ), + }, + }, + }) +} + +func TestAccAutoscaler_update(t *testing.T) { + var ascaler compute.Autoscaler + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAutoscalerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAutoscaler_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckAutoscalerExists( + "google_compute_autoscaler.foobar", &ascaler), + ), + }, + resource.TestStep{ + Config: testAccAutoscaler_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckAutoscalerExists( + "google_compute_autoscaler.foobar", &ascaler), + testAccCheckAutoscalerUpdated( + "google_compute_autoscaler.foobar", 10), + ), + }, + }, + }) +} + +func testAccCheckAutoscalerDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_autoscaler" { + continue + } + + _, err := config.clientCompute.Autoscalers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Autoscaler still exists") + } + } + + return nil +} + +func testAccCheckAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Autoscalers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Autoscaler not found") + } + + *ascaler = *found + + return nil + } +} + +func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + ascaler, err := config.clientCompute.Autoscalers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if ascaler.AutoscalingPolicy.MaxNumReplicas != max { + return fmt.Errorf("maximum replicas incorrect") + } + + return nil + } +} + +const testAccAutoscaler_basic = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test-template-foobar" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-tpool-foobar" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "terraform-test-groupmanager" + instance_template = "${google_compute_instance_template.foobar.self_link}" + target_pools = ["${google_compute_target_pool.foobar.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-a" +} + +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-ascaler" + zone = "us-central1-a" + target = "${google_compute_instance_group_manager.foobar.self_link}" + autoscaling_policy = { + max_replicas = 5 + min_replicas = 0 + cooldown_period = 60 + cpu_utilization = { + target = 0.5 + } + } + +}` + +const testAccAutoscaler_update = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test-template-foobar" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-tpool-foobar" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "terraform-test-groupmanager" + instance_template = "${google_compute_instance_template.foobar.self_link}" + target_pools = ["${google_compute_target_pool.foobar.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-a" +} + +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-ascaler" + zone = "us-central1-a" + target = "${google_compute_instance_group_manager.foobar.self_link}" + autoscaling_policy = { + max_replicas = 10 + min_replicas = 0 + cooldown_period = 60 + cpu_utilization = { + target = 0.5 + } + } + +}` diff --git a/builtin/providers/google/resource_compute_instance_group_manager.go b/builtin/providers/google/resource_compute_instance_group_manager.go new file mode 100644 index 000000000..ca0967e37 --- /dev/null +++ b/builtin/providers/google/resource_compute_instance_group_manager.go @@ -0,0 +1,301 @@ +package google + +import ( + "fmt" + "log" + "time" + + "google.golang.org/api/googleapi" + "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupManagerCreate, + Read: resourceComputeInstanceGroupManagerRead, + Update: resourceComputeInstanceGroupManagerUpdate, + Delete: resourceComputeInstanceGroupManagerDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "base_instance_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_template": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "target_pools": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + + "target_size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func waitOpZone(config *Config, op *compute.Operation, zone string, + resource string, action string) (*compute.Operation, error) { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: zone, + Type: OperationWaitZone, + } + state := w.Conf() + state.Timeout = 8 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) + } + return opRaw.(*compute.Operation), nil +} + +func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get group size, default to 1 if not given + var target_size int64 = 1 + if v, ok := d.GetOk("target_size"); ok { + target_size = int64(v.(int)) + } + + // Build the parameter + manager := &compute.InstanceGroupManager{ + Name: d.Get("name").(string), + BaseInstanceName: d.Get("base_instance_name").(string), + InstanceTemplate: d.Get("instance_template").(string), + TargetSize: target_size, + } + + // Set optional fields + if v, ok := d.GetOk("description"); ok { + manager.Description = v.(string) + } + + if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { + var s []string + for _, v := range attr.List() { + s = append(s, v.(string)) + } + manager.TargetPools = s + } + + log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) + op, err := config.clientCompute.InstanceGroupManagers.Insert( + config.Project, d.Get("zone").(string), manager).Do() + if err != nil { + return fmt.Errorf("Error creating InstanceGroupManager: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(manager.Name) + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "create") + if err != nil { + return err + } + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeInstanceGroupManagerRead(d, meta) +} + +func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + manager, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading instance group manager: %s", err) + } + + // Set computed fields + d.Set("fingerprint", manager.Fingerprint) + d.Set("instance_group", manager.InstanceGroup) + d.Set("target_size", manager.TargetSize) + d.Set("self_link", manager.SelfLink) + + return nil +} +func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + // If target_pools changes then update + if d.HasChange("target_pools") { + var targetPools []string + if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { + for _, v := range attr.List() { + targetPools = append(targetPools, v.(string)) + } + } + + // Build the parameter + setTargetPools := &compute.InstanceGroupManagersSetTargetPoolsRequest{ + Fingerprint: d.Get("fingerprint").(string), + TargetPools: targetPools, + } + + op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools( + config.Project, d.Get("zone").(string), d.Id(), setTargetPools).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update TargetPools") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("target_pools") + } + + // If instance_template changes then update + if d.HasChange("instance_template") { + // Build the parameter + setInstanceTemplate := &compute.InstanceGroupManagersSetInstanceTemplateRequest{ + InstanceTemplate: d.Get("instance_template").(string), + } + + op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate( + config.Project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update instance template") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("instance_template") + } + + // If size changes trigger a resize + if d.HasChange("target_size") { + if v, ok := d.GetOk("target_size"); ok { + // Only do anything if the new size is set + target_size := int64(v.(int)) + + op, err := config.clientCompute.InstanceGroupManagers.Resize( + config.Project, d.Get("zone").(string), d.Id(), target_size).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update target_size") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + } + + d.SetPartial("target_size") + } + + d.Partial(false) + + return resourceComputeInstanceGroupManagerRead(d, meta) +} + +func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + op, err := config.clientCompute.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting instance group manager: %s", err) + } + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "delete") + if err != nil { + return err + } + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_instance_group_manager_test.go b/builtin/providers/google/resource_compute_instance_group_manager_test.go new file mode 100644 index 000000000..d1cf89a2d --- /dev/null +++ b/builtin/providers/google/resource_compute_instance_group_manager_test.go @@ -0,0 +1,298 @@ +package google + +import ( + "fmt" + "testing" + + "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccInstanceGroupManager_basic(t *testing.T) { + var manager compute.InstanceGroupManager + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-basic", &manager), + ), + }, + }, + }) +} + +func TestAccInstanceGroupManager_update(t *testing.T) { + var manager compute.InstanceGroupManager + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update", &manager), + ), + }, + resource.TestStep{ + Config: testAccInstanceGroupManager_update2, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update", &manager), + testAccCheckInstanceGroupManagerUpdated( + "google_compute_instance_group_manager.igm-update", 3, + "google_compute_target_pool.igm-update", "terraform-test-igm-update2"), + ), + }, + }, + }) +} + +func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_group_manager" { + continue + } + _, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("InstanceGroupManager still exists") + } + } + + return nil +} + +func testAccCheckInstanceGroupManagerExists(n string, manager *compute.InstanceGroupManager) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("InstanceGroupManager not found") + } + + *manager = *found + + return nil + } +} + +func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + manager, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + // Cannot check the target pool as the instance creation is asynchronous. However, can + // check the target_size. + if manager.TargetSize != size { + return fmt.Errorf("instance count incorrect") + } + + // check that the instance template updated + instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( + config.Project, template).Do() + if err != nil { + return fmt.Errorf("Error reading instance template: %s", err) + } + + if instanceTemplate.Name != template { + return fmt.Errorf("instance template not updated") + } + + return nil + } +} + +const testAccInstanceGroupManager_basic = ` +resource "google_compute_instance_template" "igm-basic" { + name = "terraform-test-igm-basic" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-igm-basic" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "terraform-test-igm-basic" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + target_pools = ["${google_compute_target_pool.igm-basic.self_link}"] + base_instance_name = "igm-basic" + zone = "us-central1-c" + target_size = 2 +}` + +const testAccInstanceGroupManager_update = ` +resource "google_compute_instance_template" "igm-update" { + name = "terraform-test-igm-update" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-igm-update" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "terraform-test-igm-update" + instance_template = "${google_compute_instance_template.igm-update.self_link}" + target_pools = ["${google_compute_target_pool.igm-update.self_link}"] + base_instance_name = "igm-update" + zone = "us-central1-c" + target_size = 2 +}` + +// Change IGM's instance template and target size +const testAccInstanceGroupManager_update2 = ` +resource "google_compute_instance_template" "igm-update" { + name = "terraform-test-igm-update" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-igm-update" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_template" "igm-update2" { + name = "terraform-test-igm-update2" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "terraform-test-igm-update" + instance_template = "${google_compute_instance_template.igm-update2.self_link}" + target_pools = ["${google_compute_target_pool.igm-update.self_link}"] + base_instance_name = "igm-update" + zone = "us-central1-c" + target_size = 3 +}` + diff --git a/builtin/providers/google/resource_compute_instance_template.go b/builtin/providers/google/resource_compute_instance_template.go index 4069da104..cf1103402 100644 --- a/builtin/providers/google/resource_compute_instance_template.go +++ b/builtin/providers/google/resource_compute_instance_template.go @@ -227,7 +227,9 @@ func resourceComputeInstanceTemplate() *schema.Resource { } } -func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDisk { +func buildDisks(d *schema.ResourceData, meta interface{}) ([]*compute.AttachedDisk, error) { + config := meta.(*Config) + disksCount := d.Get("disk.#").(int) disks := make([]*compute.AttachedDisk, 0, disksCount) @@ -267,7 +269,14 @@ func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDis } if v, ok := d.GetOk(prefix + ".source_image"); ok { - disk.InitializeParams.SourceImage = v.(string) + imageName := v.(string) + imageUrl, err := resolveImage(config, imageName) + if err != nil { + return nil, fmt.Errorf( + "Error resolving image name '%s': %s", + imageName, err) + } + disk.InitializeParams.SourceImage = imageUrl } } @@ -286,7 +295,7 @@ func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDis disks = append(disks, &disk) } - return disks + return disks, nil } func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute.NetworkInterface) { @@ -330,7 +339,11 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool) instanceProperties.Description = d.Get("instance_description").(string) instanceProperties.MachineType = d.Get("machine_type").(string) - instanceProperties.Disks = buildDisks(d, meta) + disks, err := buildDisks(d, meta) + if err != nil { + return err + } + instanceProperties.Disks = disks metadata, err := resourceInstanceMetadata(d) if err != nil { return err diff --git a/builtin/providers/google/resource_compute_instance_template_test.go b/builtin/providers/google/resource_compute_instance_template_test.go index c552b125c..c86ea2059 100644 --- a/builtin/providers/google/resource_compute_instance_template_test.go +++ b/builtin/providers/google/resource_compute_instance_template_test.go @@ -24,7 +24,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) { "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true), ), }, }, @@ -64,7 +64,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceTemplateExists( "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true), testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false), ), }, diff --git a/website/source/docs/providers/google/r/compute_autoscaler.html.markdown b/website/source/docs/providers/google/r/compute_autoscaler.html.markdown new file mode 100644 index 000000000..6809cd10a --- /dev/null +++ b/website/source/docs/providers/google/r/compute_autoscaler.html.markdown @@ -0,0 +1,135 @@ +--- +layout: "google" +page_title: "Google: google_compute_autoscaler" +sidebar_current: "docs-google-resource-compute-autoscaler" +description: |- + Manages an Autoscaler within GCE. +--- + +# google\_compute\_autoscaler + +A Compute Engine Autoscaler automatically adds or removes virtual machines from +a managed instance group based on increases or decreases in load. This allows +your applications to gracefully handle increases in traffic and reduces cost +when the need for resources is lower. You just define the autoscaling policy and +the autoscaler performs automatic scaling based on the measured load. For more +information, see [the official +documentation](https://cloud.google.com/compute/docs/autoscaler/) and +[API](https://cloud.google.com/compute/docs/autoscaler/v1beta2/autoscalers) + + +## Example Usage + +``` +resource "google_compute_instance_template" "foobar" { + name = "foobar" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + name = "foobar" +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "foobar" + instance_template = "${google_compute_instance_template.foobar.self_link}" + target_pools = ["${google_compute_target_pool.foobar.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-f" +} + +resource "google_compute_autoscaler" "foobar" { + name = "foobar" + zone = "us-central1-f" + target = "${google_compute_instance_group_manager.foobar.self_link}" + autoscaling_policy = { + max_replicas = 5 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization = { + target = 0.5 + } + } +} +``` + +## Argument Refernce + +The following arguments are supported: + +* `description` - (Optional) An optional textual description of the instance +group manager. + +* `target` - (Required) The full URL to the instance group manager whose size we + control. + +* `autoscaling_policy.` - (Required) The parameters of the autoscaling + algorithm. Structure is documented below. + +* `zone` - (Required) The zone of the target. + +The `autoscaling_policy` block contains: + +* `max_replicas` - (Required) The group will never be larger than this. + +* `min_replicas` - (Required) The group will never be smaller than this. + +* `cooldown_period` - (Optional) Period to wait between changes. This should be + at least double the time your instances take to start up. + +* `cpu_utilization` - (Optional) A policy that scales when the cluster's average + CPU is above or below a given threshold. Structure is documented below. + +* `metric` - (Optional) A policy that scales according to Google Cloud + Monitoring metrics Structure is documented below. + +* `load_balancing_utilization` - (Optional) A policy that scales when the load + reaches a proportion of a limit defined in the HTTP load balancer. Structure +is documented below. + +The `cpu_utilization` block contains: + +* `target` - The floating point threshold where CPU utilization should be. E.g. + for 50% one would specify 0.5. + +The `metric` block contains (more documentation +[here](https://cloud.google.com/monitoring/api/metrics)): + +* `name` - The name of the Google Cloud Monitoring metric to follow, e.g. + compute.googleapis.com/instance/network/received_bytes_count + +* `type` - Either "cumulative", "delta", or "gauge". + +* `target` - The desired metric value per instance. Must be a positive value. + +The `load_balancing_utilization` block contains: + +* `target` - The floating point threshold where load balancing utilization + should be. E.g. if the load balancer's `maxRatePerInstance` is 10 requests + per second (RPS) then setting this to 0.5 would cause the group to be scaled + such that each instance receives 5 RPS. + + +## Attributes Reference + +The following attributes are exported: + +* `self_link` - The URL of the created resource. diff --git a/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown b/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown new file mode 100644 index 000000000..abaed7a58 --- /dev/null +++ b/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown @@ -0,0 +1,65 @@ +--- +layout: "google" +page_title: "Google: google_compute_instance_group_manager" +sidebar_current: "docs-google-resource-compute-instance_group_manager" +description: |- + Manages an Instance Group within GCE. +--- + +# google\_compute\_instance\_group\_manager + +The Google Compute Engine Instance Group Manager API creates and manages pools +of homogeneous Compute Engine virtual machine instances from a common instance +template. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/manager +and [API](https://cloud.google.com/compute/docs/instance-groups/manager/v1beta2/instanceGroupManagers) + +## Example Usage + +``` +resource "google_compute_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "terraform-test" + instance_template = "${google_compute_instance_template.foobar.self_link}" + target_pools = ["${google_compute_target_pool.foobar.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-a" + target_size = 2 +} +``` + +## Argument Refernce + +The following arguments are supported: + +* `base_instance_name` - (Required) The base instance name to use for +instances in this group. The value must be a valid [RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. +Supported characters are lowercase letters, numbers, and hyphens (-). Instances +are named by appending a hyphen and a random four-character string to the base +instance name. + +* `description` - (Optional) An optional textual description of the instance +group manager. + +* `instance_template` - (Required) The full URL to an instance template from +which all new instances will be created. + +* `name` - (Required) The name of the instance group manager. Must be 1-63 +characters long and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). +Supported characters include lowercase letters, numbers, and hyphens. + +* `target_size` - (Optional) If not given at creation time, this defaults to 1. Do not specify this + if you are managing the group with an autoscaler, as this will cause fighting. + +* `target_pools` - (Required) The full URL of all target pools to which new +instances in the group are added. Updating the target pool values does not +affect existing instances. + +* `zone` - (Required) The zone that instances in this group should be created in. + +## Attributes Reference + +The following attributes are exported: + +* `instance_group` - The full URL of the instance group created by the manager. + +* `self_link` - The URL of the created resource. diff --git a/website/source/layouts/google.erb b/website/source/layouts/google.erb index be706a745..f26e5e0ff 100644 --- a/website/source/layouts/google.erb +++ b/website/source/layouts/google.erb @@ -64,6 +64,15 @@ > google_dns_record_set + + > + google_compute_instance_group_manager + + + > + google_compute_autoscaler + + > google_storage_bucket