terraform/builtin/providers/aws/resource_aws_autoscaling_gr...

835 lines
24 KiB
Go
Raw Normal View History

2014-07-10 01:00:11 +02:00
package aws
import (
"fmt"
"log"
"strings"
"time"
2014-07-10 01:00:11 +02:00
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
2015-02-20 15:55:54 +01:00
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/elb"
2014-07-10 01:00:11 +02:00
)
func resourceAwsAutoscalingGroup() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAutoscalingGroupCreate,
Read: resourceAwsAutoscalingGroupRead,
Update: resourceAwsAutoscalingGroupUpdate,
Delete: resourceAwsAutoscalingGroupDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
// https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1862-L1873
value := v.(string)
if len(value) > 255 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 255 characters", k))
}
return
},
},
"launch_configuration": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"desired_capacity": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"min_elb_capacity": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"min_size": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"max_size": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"default_cooldown": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"force_delete": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"health_check_grace_period": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 300,
},
"health_check_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"availability_zones": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"placement_group": &schema.Schema{
2015-11-02 16:26:25 +01:00
Type: schema.TypeString,
Optional: true,
},
"load_balancers": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"vpc_zone_identifier": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
2014-10-23 23:58:54 +02:00
"termination_policies": &schema.Schema{
Type: schema.TypeList,
2014-10-23 23:58:54 +02:00
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
2015-03-03 23:36:25 +01:00
"wait_for_capacity_timeout": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "10m",
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
duration, err := time.ParseDuration(value)
if err != nil {
errors = append(errors, fmt.Errorf(
"%q cannot be parsed as a duration: %s", k, err))
}
if duration < 0 {
errors = append(errors, fmt.Errorf(
"%q must be greater than zero", k))
}
return
},
},
"wait_for_elb_capacity": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"enabled_metrics": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"metrics_granularity": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "1Minute",
},
"protect_from_scale_in": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"target_group_arns": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"initial_lifecycle_hook": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"default_result": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"heartbeat_timeout": {
Type: schema.TypeInt,
Optional: true,
},
"lifecycle_transition": {
Type: schema.TypeString,
Required: true,
},
"notification_metadata": {
Type: schema.TypeString,
Optional: true,
},
"notification_target_arn": {
Type: schema.TypeString,
Optional: true,
},
"role_arn": {
Type: schema.TypeString,
Optional: true,
},
},
},
},
2015-03-03 23:36:25 +01:00
"tag": autoscalingTagsSchema(),
},
2014-07-10 01:00:11 +02:00
}
}
2014-07-10 01:00:11 +02:00
func generatePutLifecycleHookInputs(asgName string, cfgs []interface{}) []autoscaling.PutLifecycleHookInput {
res := make([]autoscaling.PutLifecycleHookInput, 0, len(cfgs))
for _, raw := range cfgs {
cfg := raw.(map[string]interface{})
input := autoscaling.PutLifecycleHookInput{
AutoScalingGroupName: &asgName,
LifecycleHookName: aws.String(cfg["name"].(string)),
}
if v, ok := cfg["default_result"]; ok && v.(string) != "" {
input.DefaultResult = aws.String(v.(string))
}
if v, ok := cfg["heartbeat_timeout"]; ok && v.(int) > 0 {
input.HeartbeatTimeout = aws.Int64(int64(v.(int)))
}
if v, ok := cfg["lifecycle_transition"]; ok && v.(string) != "" {
input.LifecycleTransition = aws.String(v.(string))
}
if v, ok := cfg["notification_metadata"]; ok && v.(string) != "" {
input.NotificationMetadata = aws.String(v.(string))
}
if v, ok := cfg["notification_target_arn"]; ok && v.(string) != "" {
input.NotificationTargetARN = aws.String(v.(string))
}
if v, ok := cfg["role_arn"]; ok && v.(string) != "" {
input.RoleARN = aws.String(v.(string))
}
res = append(res, input)
}
return res
}
func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).autoscalingconn
2014-07-10 01:00:11 +02:00
var asgName string
if v, ok := d.GetOk("name"); ok {
asgName = v.(string)
} else {
asgName = resource.PrefixedUniqueId("tf-asg-")
d.Set("name", asgName)
}
createOpts := autoscaling.CreateAutoScalingGroupInput{
AutoScalingGroupName: aws.String(asgName),
LaunchConfigurationName: aws.String(d.Get("launch_configuration").(string)),
NewInstancesProtectedFromScaleIn: aws.Bool(d.Get("protect_from_scale_in").(bool)),
}
updateOpts := autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: aws.String(asgName),
}
initialLifecycleHooks := d.Get("initial_lifecycle_hook").(*schema.Set).List()
twoPhases := len(initialLifecycleHooks) > 0
minSize := aws.Int64(int64(d.Get("min_size").(int)))
maxSize := aws.Int64(int64(d.Get("max_size").(int)))
if twoPhases {
createOpts.MinSize = aws.Int64(int64(0))
createOpts.MaxSize = aws.Int64(int64(0))
updateOpts.MinSize = minSize
updateOpts.MaxSize = maxSize
if v, ok := d.GetOk("desired_capacity"); ok {
updateOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
}
} else {
createOpts.MinSize = minSize
createOpts.MaxSize = maxSize
if v, ok := d.GetOk("desired_capacity"); ok {
createOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
}
}
// Availability Zones are optional if VPC Zone Identifer(s) are specified
if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 {
createOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List())
}
2015-03-03 23:36:25 +01:00
if v, ok := d.GetOk("tag"); ok {
createOpts.Tags = autoscalingTagsFromMap(
2015-03-03 23:36:25 +01:00
setToMapByKey(v.(*schema.Set), "key"), d.Get("name").(string))
}
if v, ok := d.GetOk("default_cooldown"); ok {
createOpts.DefaultCooldown = aws.Int64(int64(v.(int)))
2014-07-10 01:00:11 +02:00
}
if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" {
createOpts.HealthCheckType = aws.String(v.(string))
2014-07-10 01:00:11 +02:00
}
if v, ok := d.GetOk("health_check_grace_period"); ok {
createOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
2014-07-10 01:00:11 +02:00
}
if v, ok := d.GetOk("placement_group"); ok {
createOpts.PlacementGroup = aws.String(v.(string))
}
2014-07-10 01:00:11 +02:00
if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
createOpts.LoadBalancerNames = expandStringList(
v.(*schema.Set).List())
2014-07-10 01:00:11 +02:00
}
if v, ok := d.GetOk("vpc_zone_identifier"); ok && v.(*schema.Set).Len() > 0 {
createOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List())
2014-07-10 01:00:11 +02:00
}
if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 {
createOpts.TerminationPolicies = expandStringList(v.([]interface{}))
2014-10-23 23:58:54 +02:00
}
2014-07-10 01:00:11 +02:00
if v, ok := d.GetOk("target_group_arns"); ok && len(v.(*schema.Set).List()) > 0 {
createOpts.TargetGroupARNs = expandStringList(v.(*schema.Set).List())
}
log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", createOpts)
_, err := conn.CreateAutoScalingGroup(&createOpts)
2014-07-10 01:00:11 +02:00
if err != nil {
return fmt.Errorf("Error creating AutoScaling Group: %s", err)
2014-07-10 01:00:11 +02:00
}
d.SetId(d.Get("name").(string))
log.Printf("[INFO] AutoScaling Group ID: %s", d.Id())
2014-07-10 01:00:11 +02:00
if twoPhases {
for _, hook := range generatePutLifecycleHookInputs(asgName, initialLifecycleHooks) {
if err = resourceAwsAutoscalingLifecycleHookPutOp(conn, &hook); err != nil {
return fmt.Errorf("Error creating initial lifecycle hooks: %s", err)
}
}
_, err = conn.UpdateAutoScalingGroup(&updateOpts)
if err != nil {
return fmt.Errorf("Error setting AutoScaling Group initial capacity: %s", err)
}
}
if err := waitForASGCapacity(d, meta, capacitySatisfiedCreate); err != nil {
return err
}
if _, ok := d.GetOk("enabled_metrics"); ok {
metricsErr := enableASGMetricsCollection(d, conn)
if metricsErr != nil {
return metricsErr
}
}
return resourceAwsAutoscalingGroupRead(d, meta)
2014-07-10 01:00:11 +02:00
}
func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).autoscalingconn
g, err := getAwsAutoscalingGroup(d.Id(), conn)
if err != nil {
return err
}
if g == nil {
log.Printf("[INFO] Autoscaling Group %q not found", d.Id())
d.SetId("")
return nil
}
2016-01-23 08:20:03 +01:00
d.Set("availability_zones", flattenStringList(g.AvailabilityZones))
d.Set("default_cooldown", g.DefaultCooldown)
d.Set("arn", g.AutoScalingGroupARN)
d.Set("desired_capacity", g.DesiredCapacity)
d.Set("health_check_grace_period", g.HealthCheckGracePeriod)
d.Set("health_check_type", g.HealthCheckType)
d.Set("launch_configuration", g.LaunchConfigurationName)
2016-01-23 08:21:59 +01:00
d.Set("load_balancers", flattenStringList(g.LoadBalancerNames))
if err := d.Set("target_group_arns", flattenStringList(g.TargetGroupARNs)); err != nil {
log.Printf("[ERR] Error setting target groups: %s", err)
}
d.Set("min_size", g.MinSize)
d.Set("max_size", g.MaxSize)
2015-11-02 16:33:46 +01:00
d.Set("placement_group", g.PlacementGroup)
d.Set("name", g.AutoScalingGroupName)
d.Set("tag", autoscalingTagDescriptionsToSlice(g.Tags))
2015-02-20 15:55:54 +01:00
d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ","))
d.Set("protect_from_scale_in", g.NewInstancesProtectedFromScaleIn)
2016-01-23 08:33:20 +01:00
// If no termination polices are explicitly configured and the upstream state
// is only using the "Default" policy, clear the state to make it consistent
// with the default AWS create API behavior.
_, ok := d.GetOk("termination_policies")
if !ok && len(g.TerminationPolicies) == 1 && *g.TerminationPolicies[0] == "Default" {
d.Set("termination_policies", []interface{}{})
} else {
d.Set("termination_policies", flattenStringList(g.TerminationPolicies))
}
if g.EnabledMetrics != nil {
if err := d.Set("enabled_metrics", flattenAsgEnabledMetrics(g.EnabledMetrics)); err != nil {
log.Printf("[WARN] Error setting metrics for (%s): %s", d.Id(), err)
}
d.Set("metrics_granularity", g.EnabledMetrics[0].Granularity)
}
return nil
}
func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).autoscalingconn
shouldWaitForCapacity := false
2014-07-10 01:00:11 +02:00
2015-04-15 22:30:35 +02:00
opts := autoscaling.UpdateAutoScalingGroupInput{
2015-02-20 15:55:54 +01:00
AutoScalingGroupName: aws.String(d.Id()),
}
opts.NewInstancesProtectedFromScaleIn = aws.Bool(d.Get("protect_from_scale_in").(bool))
if d.HasChange("default_cooldown") {
opts.DefaultCooldown = aws.Int64(int64(d.Get("default_cooldown").(int)))
}
if d.HasChange("desired_capacity") {
opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int)))
shouldWaitForCapacity = true
}
if d.HasChange("launch_configuration") {
2015-02-20 15:55:54 +01:00
opts.LaunchConfigurationName = aws.String(d.Get("launch_configuration").(string))
}
if d.HasChange("min_size") {
opts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
shouldWaitForCapacity = true
}
if d.HasChange("max_size") {
opts.MaxSize = aws.Int64(int64(d.Get("max_size").(int)))
}
if d.HasChange("health_check_grace_period") {
opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int)))
}
2015-05-30 11:51:56 +02:00
if d.HasChange("health_check_type") {
opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int)))
2015-05-30 11:51:56 +02:00
opts.HealthCheckType = aws.String(d.Get("health_check_type").(string))
}
if d.HasChange("vpc_zone_identifier") {
opts.VPCZoneIdentifier = expandVpcZoneIdentifiers(d.Get("vpc_zone_identifier").(*schema.Set).List())
}
if d.HasChange("availability_zones") {
if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 {
2016-01-23 08:45:58 +01:00
opts.AvailabilityZones = expandStringList(v.(*schema.Set).List())
}
}
2015-11-02 16:33:46 +01:00
if d.HasChange("placement_group") {
opts.PlacementGroup = aws.String(d.Get("placement_group").(string))
}
if d.HasChange("termination_policies") {
// If the termination policy is set to null, we need to explicitly set
// it back to "Default", or the API won't reset it for us.
if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 {
opts.TerminationPolicies = expandStringList(v.([]interface{}))
} else {
2016-09-12 08:14:24 +02:00
log.Printf("[DEBUG] Explicitly setting null termination policy to 'Default'")
opts.TerminationPolicies = aws.StringSlice([]string{"Default"})
}
}
if err := setAutoscalingTags(conn, d); err != nil {
2015-03-03 23:36:25 +01:00
return err
} else {
d.SetPartial("tag")
}
log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts)
_, err := conn.UpdateAutoScalingGroup(&opts)
if err != nil {
d.Partial(true)
return fmt.Errorf("Error updating Autoscaling group: %s", err)
}
if d.HasChange("load_balancers") {
o, n := d.GetChange("load_balancers")
if o == nil {
o = new(schema.Set)
}
if n == nil {
n = new(schema.Set)
}
os := o.(*schema.Set)
ns := n.(*schema.Set)
remove := expandStringList(os.Difference(ns).List())
add := expandStringList(ns.Difference(os).List())
if len(remove) > 0 {
_, err := conn.DetachLoadBalancers(&autoscaling.DetachLoadBalancersInput{
AutoScalingGroupName: aws.String(d.Id()),
LoadBalancerNames: remove,
})
if err != nil {
return fmt.Errorf("[WARN] Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err)
}
}
if len(add) > 0 {
_, err := conn.AttachLoadBalancers(&autoscaling.AttachLoadBalancersInput{
AutoScalingGroupName: aws.String(d.Id()),
LoadBalancerNames: add,
})
if err != nil {
return fmt.Errorf("[WARN] Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err)
}
}
}
if d.HasChange("target_group_arns") {
o, n := d.GetChange("target_group_arns")
if o == nil {
o = new(schema.Set)
}
if n == nil {
n = new(schema.Set)
}
os := o.(*schema.Set)
ns := n.(*schema.Set)
remove := expandStringList(os.Difference(ns).List())
add := expandStringList(ns.Difference(os).List())
if len(remove) > 0 {
_, err := conn.DetachLoadBalancerTargetGroups(&autoscaling.DetachLoadBalancerTargetGroupsInput{
AutoScalingGroupName: aws.String(d.Id()),
TargetGroupARNs: remove,
})
if err != nil {
return fmt.Errorf("[WARN] Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err)
}
}
if len(add) > 0 {
_, err := conn.AttachLoadBalancerTargetGroups(&autoscaling.AttachLoadBalancerTargetGroupsInput{
AutoScalingGroupName: aws.String(d.Id()),
TargetGroupARNs: add,
})
if err != nil {
return fmt.Errorf("[WARN] Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err)
}
}
}
if shouldWaitForCapacity {
if err := waitForASGCapacity(d, meta, capacitySatisfiedUpdate); err != nil {
return errwrap.Wrapf("Error waiting for AutoScaling Group Capacity: {{err}}", err)
}
}
if d.HasChange("enabled_metrics") {
if err := updateASGMetricsCollection(d, conn); err != nil {
return errwrap.Wrapf("Error updating AutoScaling Group Metrics collection: {{err}}", err)
}
}
return resourceAwsAutoscalingGroupRead(d, meta)
2014-07-10 01:00:11 +02:00
}
func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).autoscalingconn
2014-07-10 01:00:11 +02:00
// Read the autoscaling group first. If it doesn't exist, we're done.
// We need the group in order to check if there are instances attached.
// If so, we need to remove those first.
g, err := getAwsAutoscalingGroup(d.Id(), conn)
if err != nil {
return err
}
if g == nil {
log.Printf("[INFO] Autoscaling Group %q not found", d.Id())
d.SetId("")
return nil
}
2015-02-20 15:55:54 +01:00
if len(g.Instances) > 0 || *g.DesiredCapacity > 0 {
if err := resourceAwsAutoscalingGroupDrain(d, meta); err != nil {
return err
}
}
log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id())
deleteopts := autoscaling.DeleteAutoScalingGroupInput{
AutoScalingGroupName: aws.String(d.Id()),
ForceDelete: aws.Bool(d.Get("force_delete").(bool)),
2014-07-14 17:36:25 +02:00
}
// We retry the delete operation to handle InUse/InProgress errors coming
// from scaling operations. We should be able to sneak in a delete in between
// scaling operations within 5m.
err = resource.Retry(5*time.Minute, func() *resource.RetryError {
if _, err := conn.DeleteAutoScalingGroup(&deleteopts); err != nil {
if awserr, ok := err.(awserr.Error); ok {
switch awserr.Code() {
case "InvalidGroup.NotFound":
// Already gone? Sure!
return nil
case "ResourceInUse", "ScalingActivityInProgress":
// These are retryable
return resource.RetryableError(awserr)
}
}
// Didn't recognize the error, so shouldn't retry.
return resource.NonRetryableError(err)
}
// Successful delete
return nil
})
if err != nil {
return err
}
2014-07-10 01:00:11 +02:00
return resource.Retry(5*time.Minute, func() *resource.RetryError {
if g, _ = getAwsAutoscalingGroup(d.Id(), conn); g != nil {
return resource.RetryableError(
fmt.Errorf("Auto Scaling Group still exists"))
}
return nil
})
2014-07-10 01:00:11 +02:00
}
func getAwsAutoscalingGroup(
asgName string,
conn *autoscaling.AutoScaling) (*autoscaling.Group, error) {
2014-07-10 01:00:11 +02:00
2015-04-15 22:30:35 +02:00
describeOpts := autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{aws.String(asgName)},
2014-07-10 01:00:11 +02:00
}
2014-07-14 17:36:25 +02:00
log.Printf("[DEBUG] AutoScaling Group describe configuration: %#v", describeOpts)
describeGroups, err := conn.DescribeAutoScalingGroups(&describeOpts)
2014-07-10 01:00:11 +02:00
if err != nil {
autoscalingerr, ok := err.(awserr.Error)
if ok && autoscalingerr.Code() == "InvalidGroup.NotFound" {
return nil, nil
}
return nil, fmt.Errorf("Error retrieving AutoScaling groups: %s", err)
2014-07-10 01:00:11 +02:00
}
// Search for the autoscaling group
for idx, asc := range describeGroups.AutoScalingGroups {
if *asc.AutoScalingGroupName == asgName {
2015-04-15 22:30:35 +02:00
return describeGroups.AutoScalingGroups[idx], nil
2014-07-10 01:00:11 +02:00
}
}
return nil, nil
}
2014-07-10 01:00:11 +02:00
func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).autoscalingconn
if d.Get("force_delete").(bool) {
log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.")
return nil
}
// First, set the capacity to zero so the group will drain
log.Printf("[DEBUG] Reducing autoscaling group capacity to zero")
2015-04-15 22:30:35 +02:00
opts := autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: aws.String(d.Id()),
DesiredCapacity: aws.Int64(0),
MinSize: aws.Int64(0),
MaxSize: aws.Int64(0),
}
if _, err := conn.UpdateAutoScalingGroup(&opts); err != nil {
return fmt.Errorf("Error setting capacity to zero to drain: %s", err)
}
// Next, wait for the autoscale group to drain
log.Printf("[DEBUG] Waiting for group to have zero instances")
return resource.Retry(10*time.Minute, func() *resource.RetryError {
g, err := getAwsAutoscalingGroup(d.Id(), conn)
if err != nil {
return resource.NonRetryableError(err)
}
if g == nil {
log.Printf("[INFO] Autoscaling Group %q not found", d.Id())
d.SetId("")
return nil
}
if len(g.Instances) == 0 {
return nil
}
return resource.RetryableError(
fmt.Errorf("group still has %d instances", len(g.Instances)))
})
}
func enableASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoScaling) error {
props := &autoscaling.EnableMetricsCollectionInput{
AutoScalingGroupName: aws.String(d.Id()),
Granularity: aws.String(d.Get("metrics_granularity").(string)),
Metrics: expandStringList(d.Get("enabled_metrics").(*schema.Set).List()),
}
log.Printf("[INFO] Enabling metrics collection for the ASG: %s", d.Id())
_, metricsErr := conn.EnableMetricsCollection(props)
if metricsErr != nil {
return metricsErr
}
return nil
}
func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoScaling) error {
o, n := d.GetChange("enabled_metrics")
if o == nil {
o = new(schema.Set)
}
if n == nil {
n = new(schema.Set)
}
os := o.(*schema.Set)
ns := n.(*schema.Set)
disableMetrics := os.Difference(ns)
if disableMetrics.Len() != 0 {
props := &autoscaling.DisableMetricsCollectionInput{
AutoScalingGroupName: aws.String(d.Id()),
Metrics: expandStringList(disableMetrics.List()),
}
_, err := conn.DisableMetricsCollection(props)
if err != nil {
return fmt.Errorf("Failure to Disable metrics collection types for ASG %s: %s", d.Id(), err)
}
}
enabledMetrics := ns.Difference(os)
if enabledMetrics.Len() != 0 {
props := &autoscaling.EnableMetricsCollectionInput{
AutoScalingGroupName: aws.String(d.Id()),
Metrics: expandStringList(enabledMetrics.List()),
provider/aws: Fix bug with Updating `aws_autoscaling_group` (#7698) `enabled_metrics` Fixes #7693 The metrics_granularity parameter was not being passed to the `EnableMetricsCollection` when we were calling it from the Update func. this was causing the API call to silently fail and not update the metrics for collection - unfortunately the enabled_metrics were still being added to the state :( By passing the granularity, we now get the correct metrics for collection ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSAutoScalingGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSAutoScalingGroup_ -timeout 120m === RUN TestAccAWSAutoScalingGroup_importBasic --- PASS: TestAccAWSAutoScalingGroup_importBasic (166.86s) === RUN TestAccAWSAutoScalingGroup_basic --- PASS: TestAccAWSAutoScalingGroup_basic (240.23s) === RUN TestAccAWSAutoScalingGroup_autoGeneratedName --- PASS: TestAccAWSAutoScalingGroup_autoGeneratedName (50.29s) === RUN TestAccAWSAutoScalingGroup_terminationPolicies --- PASS: TestAccAWSAutoScalingGroup_terminationPolicies (79.93s) === RUN TestAccAWSAutoScalingGroup_tags --- PASS: TestAccAWSAutoScalingGroup_tags (270.79s) === RUN TestAccAWSAutoScalingGroup_VpcUpdates --- PASS: TestAccAWSAutoScalingGroup_VpcUpdates (77.76s) === RUN TestAccAWSAutoScalingGroup_WithLoadBalancer --- PASS: TestAccAWSAutoScalingGroup_WithLoadBalancer (400.67s) === RUN TestAccAWSAutoScalingGroup_withPlacementGroup --- PASS: TestAccAWSAutoScalingGroup_withPlacementGroup (134.39s) === RUN TestAccAWSAutoScalingGroup_enablingMetrics --- PASS: TestAccAWSAutoScalingGroup_enablingMetrics (305.32s) === RUN TestAccAWSAutoScalingGroup_withMetrics --- PASS: TestAccAWSAutoScalingGroup_withMetrics (48.56s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 1774.819s ```
2016-07-20 20:36:45 +02:00
Granularity: aws.String(d.Get("metrics_granularity").(string)),
}
_, err := conn.EnableMetricsCollection(props)
if err != nil {
return fmt.Errorf("Failure to Enable metrics collection types for ASG %s: %s", d.Id(), err)
}
}
return nil
}
// Returns a mapping of the instance states of all the ELBs attached to the
// provided ASG.
//
// Nested like: lbName -> instanceId -> instanceState
func getLBInstanceStates(g *autoscaling.Group, meta interface{}) (map[string]map[string]string, error) {
lbInstanceStates := make(map[string]map[string]string)
elbconn := meta.(*AWSClient).elbconn
for _, lbName := range g.LoadBalancerNames {
lbInstanceStates[*lbName] = make(map[string]string)
opts := &elb.DescribeInstanceHealthInput{LoadBalancerName: lbName}
r, err := elbconn.DescribeInstanceHealth(opts)
if err != nil {
return nil, err
}
for _, is := range r.InstanceStates {
if is.InstanceId == nil || is.State == nil {
continue
}
lbInstanceStates[*lbName][*is.InstanceId] = *is.State
}
}
return lbInstanceStates, nil
}
func expandVpcZoneIdentifiers(list []interface{}) *string {
strs := make([]string, len(list))
for _, s := range list {
strs = append(strs, s.(string))
}
return aws.String(strings.Join(strs, ","))
}