2014-07-10 01:00:11 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2014-12-12 23:21:20 +01:00
|
|
|
"strings"
|
2014-10-18 05:10:52 +02:00
|
|
|
"time"
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2016-09-02 16:24:17 +02:00
|
|
|
"github.com/hashicorp/errwrap"
|
2014-10-18 05:10:52 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
2014-10-10 23:34:40 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2015-02-20 15:55:54 +01:00
|
|
|
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
|
|
"github.com/aws/aws-sdk-go/service/autoscaling"
|
|
|
|
"github.com/aws/aws-sdk-go/service/elb"
|
2014-07-10 01:00:11 +02:00
|
|
|
)
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
func resourceAwsAutoscalingGroup() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceAwsAutoscalingGroupCreate,
|
|
|
|
Read: resourceAwsAutoscalingGroupRead,
|
|
|
|
Update: resourceAwsAutoscalingGroupUpdate,
|
|
|
|
Delete: resourceAwsAutoscalingGroupDelete,
|
2016-05-13 20:39:08 +02:00
|
|
|
Importer: &schema.ResourceImporter{
|
|
|
|
State: schema.ImportStatePassthrough,
|
|
|
|
},
|
2014-10-10 23:34:40 +02:00
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
2015-10-31 10:24:46 +01:00
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
2014-10-10 23:34:40 +02:00
|
|
|
ForceNew: true,
|
2015-06-25 16:01:40 +02:00
|
|
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
// https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1862-L1873
|
|
|
|
value := v.(string)
|
|
|
|
if len(value) > 255 {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q cannot be longer than 255 characters", k))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
},
|
2014-10-10 23:34:40 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"launch_configuration": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"desired_capacity": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2015-05-14 19:45:21 +02:00
|
|
|
"min_elb_capacity": &schema.Schema{
|
2016-01-27 16:55:10 +01:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
2015-05-14 19:45:21 +02:00
|
|
|
},
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
"min_size": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"max_size": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"default_cooldown": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"force_delete": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
2015-10-12 22:50:07 +02:00
|
|
|
Default: false,
|
2014-10-10 23:34:40 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"health_check_grace_period": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
2016-03-24 00:55:37 +01:00
|
|
|
Default: 300,
|
2014-10-10 23:34:40 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"health_check_type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"availability_zones": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
2015-07-14 17:19:10 +02:00
|
|
|
Optional: true,
|
2016-02-01 02:44:19 +01:00
|
|
|
Computed: true,
|
2014-10-10 23:34:40 +02:00
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
2015-04-09 15:38:16 +02:00
|
|
|
Set: schema.HashString,
|
2014-10-10 23:34:40 +02:00
|
|
|
},
|
|
|
|
|
2015-10-30 22:45:19 +01:00
|
|
|
"placement_group": &schema.Schema{
|
2015-11-02 16:26:25 +01:00
|
|
|
Type: schema.TypeString,
|
2015-10-30 22:45:19 +01:00
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
"load_balancers": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
2015-04-09 15:38:16 +02:00
|
|
|
Set: schema.HashString,
|
2014-10-10 23:34:40 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
"vpc_zone_identifier": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
2015-04-09 15:38:16 +02:00
|
|
|
Set: schema.HashString,
|
2014-10-10 23:34:40 +02:00
|
|
|
},
|
2014-10-23 23:58:54 +02:00
|
|
|
|
|
|
|
"termination_policies": &schema.Schema{
|
2015-07-29 23:44:02 +02:00
|
|
|
Type: schema.TypeList,
|
2014-10-23 23:58:54 +02:00
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
2015-03-03 23:36:25 +01:00
|
|
|
|
2015-09-08 20:15:30 +02:00
|
|
|
"wait_for_capacity_timeout": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Default: "10m",
|
|
|
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(string)
|
|
|
|
duration, err := time.ParseDuration(value)
|
|
|
|
if err != nil {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q cannot be parsed as a duration: %s", k, err))
|
|
|
|
}
|
|
|
|
if duration < 0 {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q must be greater than zero", k))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2015-11-17 01:16:22 +01:00
|
|
|
"wait_for_elb_capacity": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
|
2016-01-15 11:29:15 +01:00
|
|
|
"enabled_metrics": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
2016-02-29 21:58:41 +01:00
|
|
|
Optional: true,
|
2016-01-15 11:29:15 +01:00
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: schema.HashString,
|
|
|
|
},
|
|
|
|
|
2016-02-29 21:58:41 +01:00
|
|
|
"metrics_granularity": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Default: "1Minute",
|
|
|
|
},
|
|
|
|
|
2016-06-29 08:36:34 +02:00
|
|
|
"protect_from_scale_in": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
|
|
|
|
2016-08-19 21:07:53 +02:00
|
|
|
"target_group_arns": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: schema.HashString,
|
|
|
|
},
|
|
|
|
|
2016-08-27 16:20:11 +02:00
|
|
|
"arn": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2016-03-14 14:13:12 +01:00
|
|
|
"initial_lifecycle_hook": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"default_result": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
"heartbeat_timeout": {
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"lifecycle_transition": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"notification_metadata": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"notification_target_arn": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"role_arn": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2015-03-03 23:36:25 +01:00
|
|
|
"tag": autoscalingTagsSchema(),
|
2014-10-10 23:34:40 +02:00
|
|
|
},
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
2014-10-10 23:34:40 +02:00
|
|
|
}
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2016-03-14 14:13:12 +01:00
|
|
|
func generatePutLifecycleHookInputs(asgName string, cfgs []interface{}) []autoscaling.PutLifecycleHookInput {
|
|
|
|
res := make([]autoscaling.PutLifecycleHookInput, 0, len(cfgs))
|
|
|
|
|
|
|
|
for _, raw := range cfgs {
|
|
|
|
cfg := raw.(map[string]interface{})
|
|
|
|
|
|
|
|
input := autoscaling.PutLifecycleHookInput{
|
|
|
|
AutoScalingGroupName: &asgName,
|
|
|
|
LifecycleHookName: aws.String(cfg["name"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := cfg["default_result"]; ok && v.(string) != "" {
|
|
|
|
input.DefaultResult = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := cfg["heartbeat_timeout"]; ok && v.(int) > 0 {
|
|
|
|
input.HeartbeatTimeout = aws.Int64(int64(v.(int)))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := cfg["lifecycle_transition"]; ok && v.(string) != "" {
|
|
|
|
input.LifecycleTransition = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := cfg["notification_metadata"]; ok && v.(string) != "" {
|
|
|
|
input.NotificationMetadata = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := cfg["notification_target_arn"]; ok && v.(string) != "" {
|
|
|
|
input.NotificationTargetARN = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := cfg["role_arn"]; ok && v.(string) != "" {
|
|
|
|
input.RoleARN = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
res = append(res, input)
|
|
|
|
}
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
2015-05-07 01:54:59 +02:00
|
|
|
conn := meta.(*AWSClient).autoscalingconn
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2015-10-31 10:24:46 +01:00
|
|
|
var asgName string
|
|
|
|
if v, ok := d.GetOk("name"); ok {
|
|
|
|
asgName = v.(string)
|
|
|
|
} else {
|
|
|
|
asgName = resource.PrefixedUniqueId("tf-asg-")
|
|
|
|
d.Set("name", asgName)
|
|
|
|
}
|
|
|
|
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts := autoscaling.CreateAutoScalingGroupInput{
|
|
|
|
AutoScalingGroupName: aws.String(asgName),
|
|
|
|
LaunchConfigurationName: aws.String(d.Get("launch_configuration").(string)),
|
|
|
|
NewInstancesProtectedFromScaleIn: aws.Bool(d.Get("protect_from_scale_in").(bool)),
|
|
|
|
}
|
|
|
|
updateOpts := autoscaling.UpdateAutoScalingGroupInput{
|
|
|
|
AutoScalingGroupName: aws.String(asgName),
|
|
|
|
}
|
|
|
|
|
|
|
|
initialLifecycleHooks := d.Get("initial_lifecycle_hook").(*schema.Set).List()
|
|
|
|
twoPhases := len(initialLifecycleHooks) > 0
|
|
|
|
|
|
|
|
minSize := aws.Int64(int64(d.Get("min_size").(int)))
|
|
|
|
maxSize := aws.Int64(int64(d.Get("max_size").(int)))
|
|
|
|
|
|
|
|
if twoPhases {
|
|
|
|
createOpts.MinSize = aws.Int64(int64(0))
|
|
|
|
createOpts.MaxSize = aws.Int64(int64(0))
|
|
|
|
|
|
|
|
updateOpts.MinSize = minSize
|
|
|
|
updateOpts.MaxSize = maxSize
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("desired_capacity"); ok {
|
|
|
|
updateOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
createOpts.MinSize = minSize
|
|
|
|
createOpts.MaxSize = maxSize
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("desired_capacity"); ok {
|
|
|
|
createOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
|
|
|
|
}
|
|
|
|
}
|
2015-07-14 17:19:10 +02:00
|
|
|
|
|
|
|
// Availability Zones are optional if VPC Zone Identifer(s) are specified
|
|
|
|
if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List())
|
2015-07-14 17:19:10 +02:00
|
|
|
}
|
2014-10-10 23:34:40 +02:00
|
|
|
|
2015-03-03 23:36:25 +01:00
|
|
|
if v, ok := d.GetOk("tag"); ok {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.Tags = autoscalingTagsFromMap(
|
2015-03-03 23:36:25 +01:00
|
|
|
setToMapByKey(v.(*schema.Set), "key"), d.Get("name").(string))
|
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
if v, ok := d.GetOk("default_cooldown"); ok {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.DefaultCooldown = aws.Int64(int64(v.(int)))
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2015-03-20 16:11:12 +01:00
|
|
|
if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.HealthCheckType = aws.String(v.(string))
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
if v, ok := d.GetOk("health_check_grace_period"); ok {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2015-11-02 16:33:35 +01:00
|
|
|
if v, ok := d.GetOk("placement_group"); ok {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.PlacementGroup = aws.String(v.(string))
|
2015-10-30 22:45:19 +01:00
|
|
|
}
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2015-02-18 01:12:02 +01:00
|
|
|
if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.LoadBalancerNames = expandStringList(
|
2014-10-10 23:34:40 +02:00
|
|
|
v.(*schema.Set).List())
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2015-02-18 01:12:02 +01:00
|
|
|
if v, ok := d.GetOk("vpc_zone_identifier"); ok && v.(*schema.Set).Len() > 0 {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List())
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
2014-12-10 02:11:50 +01:00
|
|
|
|
2015-07-29 23:44:02 +02:00
|
|
|
if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.TerminationPolicies = expandStringList(v.([]interface{}))
|
2014-10-23 23:58:54 +02:00
|
|
|
}
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2016-08-19 21:07:53 +02:00
|
|
|
if v, ok := d.GetOk("target_group_arns"); ok && len(v.(*schema.Set).List()) > 0 {
|
2016-03-14 14:13:12 +01:00
|
|
|
createOpts.TargetGroupARNs = expandStringList(v.(*schema.Set).List())
|
2016-08-19 21:07:53 +02:00
|
|
|
}
|
|
|
|
|
2016-03-14 14:13:12 +01:00
|
|
|
log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", createOpts)
|
|
|
|
_, err := conn.CreateAutoScalingGroup(&createOpts)
|
2014-07-10 01:00:11 +02:00
|
|
|
if err != nil {
|
2016-03-14 14:13:12 +01:00
|
|
|
return fmt.Errorf("Error creating AutoScaling Group: %s", err)
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
d.SetId(d.Get("name").(string))
|
|
|
|
log.Printf("[INFO] AutoScaling Group ID: %s", d.Id())
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2016-03-14 14:13:12 +01:00
|
|
|
if twoPhases {
|
|
|
|
for _, hook := range generatePutLifecycleHookInputs(asgName, initialLifecycleHooks) {
|
|
|
|
if err = resourceAwsAutoscalingLifecycleHookPutOp(conn, &hook); err != nil {
|
|
|
|
return fmt.Errorf("Error creating initial lifecycle hooks: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = conn.UpdateAutoScalingGroup(&updateOpts)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error setting AutoScaling Group initial capacity: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := waitForASGCapacity(d, meta, capacitySatisfiedCreate); err != nil {
|
2015-05-07 01:34:20 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-02-29 21:58:41 +01:00
|
|
|
if _, ok := d.GetOk("enabled_metrics"); ok {
|
|
|
|
metricsErr := enableASGMetricsCollection(d, conn)
|
|
|
|
if metricsErr != nil {
|
|
|
|
return metricsErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
return resourceAwsAutoscalingGroupRead(d, meta)
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error {
|
2016-01-28 18:52:35 +01:00
|
|
|
conn := meta.(*AWSClient).autoscalingconn
|
|
|
|
|
|
|
|
g, err := getAwsAutoscalingGroup(d.Id(), conn)
|
2014-11-21 17:58:34 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if g == nil {
|
2016-01-28 18:52:35 +01:00
|
|
|
log.Printf("[INFO] Autoscaling Group %q not found", d.Id())
|
|
|
|
d.SetId("")
|
2014-11-21 17:58:34 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-23 08:20:03 +01:00
|
|
|
d.Set("availability_zones", flattenStringList(g.AvailabilityZones))
|
2015-03-26 20:49:15 +01:00
|
|
|
d.Set("default_cooldown", g.DefaultCooldown)
|
2016-08-27 16:20:11 +02:00
|
|
|
d.Set("arn", g.AutoScalingGroupARN)
|
2015-03-26 20:49:15 +01:00
|
|
|
d.Set("desired_capacity", g.DesiredCapacity)
|
|
|
|
d.Set("health_check_grace_period", g.HealthCheckGracePeriod)
|
|
|
|
d.Set("health_check_type", g.HealthCheckType)
|
|
|
|
d.Set("launch_configuration", g.LaunchConfigurationName)
|
2016-01-23 08:21:59 +01:00
|
|
|
d.Set("load_balancers", flattenStringList(g.LoadBalancerNames))
|
2016-08-19 21:07:53 +02:00
|
|
|
if err := d.Set("target_group_arns", flattenStringList(g.TargetGroupARNs)); err != nil {
|
|
|
|
log.Printf("[ERR] Error setting target groups: %s", err)
|
|
|
|
}
|
2015-03-26 20:49:15 +01:00
|
|
|
d.Set("min_size", g.MinSize)
|
|
|
|
d.Set("max_size", g.MaxSize)
|
2015-11-02 16:33:46 +01:00
|
|
|
d.Set("placement_group", g.PlacementGroup)
|
2015-03-26 20:49:15 +01:00
|
|
|
d.Set("name", g.AutoScalingGroupName)
|
2016-04-22 06:36:14 +02:00
|
|
|
d.Set("tag", autoscalingTagDescriptionsToSlice(g.Tags))
|
2015-02-20 15:55:54 +01:00
|
|
|
d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ","))
|
2016-06-29 08:36:34 +02:00
|
|
|
d.Set("protect_from_scale_in", g.NewInstancesProtectedFromScaleIn)
|
2016-01-23 08:33:20 +01:00
|
|
|
|
|
|
|
// If no termination polices are explicitly configured and the upstream state
|
|
|
|
// is only using the "Default" policy, clear the state to make it consistent
|
|
|
|
// with the default AWS create API behavior.
|
|
|
|
_, ok := d.GetOk("termination_policies")
|
|
|
|
if !ok && len(g.TerminationPolicies) == 1 && *g.TerminationPolicies[0] == "Default" {
|
|
|
|
d.Set("termination_policies", []interface{}{})
|
|
|
|
} else {
|
|
|
|
d.Set("termination_policies", flattenStringList(g.TerminationPolicies))
|
|
|
|
}
|
2014-11-21 17:58:34 +01:00
|
|
|
|
2016-01-15 11:29:15 +01:00
|
|
|
if g.EnabledMetrics != nil {
|
|
|
|
if err := d.Set("enabled_metrics", flattenAsgEnabledMetrics(g.EnabledMetrics)); err != nil {
|
|
|
|
log.Printf("[WARN] Error setting metrics for (%s): %s", d.Id(), err)
|
|
|
|
}
|
2016-02-29 21:58:41 +01:00
|
|
|
d.Set("metrics_granularity", g.EnabledMetrics[0].Granularity)
|
2016-01-15 11:29:15 +01:00
|
|
|
}
|
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
2015-05-07 01:54:59 +02:00
|
|
|
conn := meta.(*AWSClient).autoscalingconn
|
2015-11-17 01:16:22 +01:00
|
|
|
shouldWaitForCapacity := false
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2015-04-15 22:30:35 +02:00
|
|
|
opts := autoscaling.UpdateAutoScalingGroupInput{
|
2015-02-20 15:55:54 +01:00
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
2014-07-15 23:20:54 +02:00
|
|
|
}
|
|
|
|
|
2016-06-29 08:36:34 +02:00
|
|
|
opts.NewInstancesProtectedFromScaleIn = aws.Bool(d.Get("protect_from_scale_in").(bool))
|
|
|
|
|
2015-06-26 07:51:31 +02:00
|
|
|
if d.HasChange("default_cooldown") {
|
2015-07-28 22:29:46 +02:00
|
|
|
opts.DefaultCooldown = aws.Int64(int64(d.Get("default_cooldown").(int)))
|
2015-06-26 07:51:31 +02:00
|
|
|
}
|
|
|
|
|
2014-10-11 01:25:23 +02:00
|
|
|
if d.HasChange("desired_capacity") {
|
2015-07-28 22:29:46 +02:00
|
|
|
opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int)))
|
2015-11-17 01:16:22 +01:00
|
|
|
shouldWaitForCapacity = true
|
2014-10-11 01:25:23 +02:00
|
|
|
}
|
|
|
|
|
2015-02-18 00:48:15 +01:00
|
|
|
if d.HasChange("launch_configuration") {
|
2015-02-20 15:55:54 +01:00
|
|
|
opts.LaunchConfigurationName = aws.String(d.Get("launch_configuration").(string))
|
2015-02-18 00:48:15 +01:00
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
if d.HasChange("min_size") {
|
2015-07-28 22:29:46 +02:00
|
|
|
opts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
|
2015-11-17 01:16:22 +01:00
|
|
|
shouldWaitForCapacity = true
|
2014-07-15 23:20:54 +02:00
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
if d.HasChange("max_size") {
|
2015-07-28 22:29:46 +02:00
|
|
|
opts.MaxSize = aws.Int64(int64(d.Get("max_size").(int)))
|
2014-07-15 23:20:54 +02:00
|
|
|
}
|
2015-05-07 01:34:20 +02:00
|
|
|
|
2015-04-26 05:00:04 +02:00
|
|
|
if d.HasChange("health_check_grace_period") {
|
2015-07-28 22:29:46 +02:00
|
|
|
opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int)))
|
2015-05-07 01:34:20 +02:00
|
|
|
}
|
2014-07-15 23:20:54 +02:00
|
|
|
|
2015-05-30 11:51:56 +02:00
|
|
|
if d.HasChange("health_check_type") {
|
2015-07-28 22:29:46 +02:00
|
|
|
opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int)))
|
2015-05-30 11:51:56 +02:00
|
|
|
opts.HealthCheckType = aws.String(d.Get("health_check_type").(string))
|
|
|
|
}
|
|
|
|
|
2015-07-14 17:19:10 +02:00
|
|
|
if d.HasChange("vpc_zone_identifier") {
|
|
|
|
opts.VPCZoneIdentifier = expandVpcZoneIdentifiers(d.Get("vpc_zone_identifier").(*schema.Set).List())
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.HasChange("availability_zones") {
|
|
|
|
if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 {
|
2016-01-23 08:45:58 +01:00
|
|
|
opts.AvailabilityZones = expandStringList(v.(*schema.Set).List())
|
2015-07-14 17:19:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-02 16:33:46 +01:00
|
|
|
if d.HasChange("placement_group") {
|
|
|
|
opts.PlacementGroup = aws.String(d.Get("placement_group").(string))
|
|
|
|
}
|
|
|
|
|
2015-07-29 23:44:02 +02:00
|
|
|
if d.HasChange("termination_policies") {
|
|
|
|
// If the termination policy is set to null, we need to explicitly set
|
|
|
|
// it back to "Default", or the API won't reset it for us.
|
2016-02-11 17:45:11 +01:00
|
|
|
if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 {
|
2015-07-29 23:44:02 +02:00
|
|
|
opts.TerminationPolicies = expandStringList(v.([]interface{}))
|
|
|
|
} else {
|
2016-09-12 08:14:24 +02:00
|
|
|
log.Printf("[DEBUG] Explicitly setting null termination policy to 'Default'")
|
2016-02-11 17:45:11 +01:00
|
|
|
opts.TerminationPolicies = aws.StringSlice([]string{"Default"})
|
2015-07-29 23:44:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-07 01:54:59 +02:00
|
|
|
if err := setAutoscalingTags(conn, d); err != nil {
|
2015-03-03 23:36:25 +01:00
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
d.SetPartial("tag")
|
|
|
|
}
|
|
|
|
|
2014-07-15 23:20:54 +02:00
|
|
|
log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts)
|
2015-05-07 01:54:59 +02:00
|
|
|
_, err := conn.UpdateAutoScalingGroup(&opts)
|
2014-07-15 23:20:54 +02:00
|
|
|
if err != nil {
|
2014-10-10 23:34:40 +02:00
|
|
|
d.Partial(true)
|
|
|
|
return fmt.Errorf("Error updating Autoscaling group: %s", err)
|
2014-07-15 23:20:54 +02:00
|
|
|
}
|
|
|
|
|
2015-06-24 23:37:23 +02:00
|
|
|
if d.HasChange("load_balancers") {
|
|
|
|
|
|
|
|
o, n := d.GetChange("load_balancers")
|
|
|
|
if o == nil {
|
|
|
|
o = new(schema.Set)
|
|
|
|
}
|
|
|
|
if n == nil {
|
|
|
|
n = new(schema.Set)
|
|
|
|
}
|
|
|
|
|
|
|
|
os := o.(*schema.Set)
|
|
|
|
ns := n.(*schema.Set)
|
|
|
|
remove := expandStringList(os.Difference(ns).List())
|
|
|
|
add := expandStringList(ns.Difference(os).List())
|
|
|
|
|
|
|
|
if len(remove) > 0 {
|
|
|
|
_, err := conn.DetachLoadBalancers(&autoscaling.DetachLoadBalancersInput{
|
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
|
|
|
LoadBalancerNames: remove,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[WARN] Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(add) > 0 {
|
|
|
|
_, err := conn.AttachLoadBalancers(&autoscaling.AttachLoadBalancersInput{
|
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
|
|
|
LoadBalancerNames: add,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[WARN] Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-19 21:07:53 +02:00
|
|
|
if d.HasChange("target_group_arns") {
|
|
|
|
|
|
|
|
o, n := d.GetChange("target_group_arns")
|
|
|
|
if o == nil {
|
|
|
|
o = new(schema.Set)
|
|
|
|
}
|
|
|
|
if n == nil {
|
|
|
|
n = new(schema.Set)
|
|
|
|
}
|
|
|
|
|
|
|
|
os := o.(*schema.Set)
|
|
|
|
ns := n.(*schema.Set)
|
|
|
|
remove := expandStringList(os.Difference(ns).List())
|
|
|
|
add := expandStringList(ns.Difference(os).List())
|
|
|
|
|
|
|
|
if len(remove) > 0 {
|
|
|
|
_, err := conn.DetachLoadBalancerTargetGroups(&autoscaling.DetachLoadBalancerTargetGroupsInput{
|
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
|
|
|
TargetGroupARNs: remove,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[WARN] Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(add) > 0 {
|
|
|
|
_, err := conn.AttachLoadBalancerTargetGroups(&autoscaling.AttachLoadBalancerTargetGroupsInput{
|
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
|
|
|
TargetGroupARNs: add,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("[WARN] Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-17 01:16:22 +01:00
|
|
|
if shouldWaitForCapacity {
|
2016-09-04 01:08:04 +02:00
|
|
|
if err := waitForASGCapacity(d, meta, capacitySatisfiedUpdate); err != nil {
|
2016-09-02 16:24:17 +02:00
|
|
|
return errwrap.Wrapf("Error waiting for AutoScaling Group Capacity: {{err}}", err)
|
|
|
|
}
|
2015-11-17 01:16:22 +01:00
|
|
|
}
|
|
|
|
|
2016-02-29 21:58:41 +01:00
|
|
|
if d.HasChange("enabled_metrics") {
|
2016-09-02 16:24:17 +02:00
|
|
|
if err := updateASGMetricsCollection(d, conn); err != nil {
|
|
|
|
return errwrap.Wrapf("Error updating AutoScaling Group Metrics collection: {{err}}", err)
|
|
|
|
}
|
2016-02-29 21:58:41 +01:00
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
return resourceAwsAutoscalingGroupRead(d, meta)
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
2015-05-07 01:54:59 +02:00
|
|
|
conn := meta.(*AWSClient).autoscalingconn
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2014-10-18 05:10:52 +02:00
|
|
|
// Read the autoscaling group first. If it doesn't exist, we're done.
|
|
|
|
// We need the group in order to check if there are instances attached.
|
|
|
|
// If so, we need to remove those first.
|
2016-01-28 18:52:35 +01:00
|
|
|
g, err := getAwsAutoscalingGroup(d.Id(), conn)
|
2014-10-18 05:10:52 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if g == nil {
|
2016-01-28 18:52:35 +01:00
|
|
|
log.Printf("[INFO] Autoscaling Group %q not found", d.Id())
|
|
|
|
d.SetId("")
|
2014-10-18 05:10:52 +02:00
|
|
|
return nil
|
|
|
|
}
|
2015-02-20 15:55:54 +01:00
|
|
|
if len(g.Instances) > 0 || *g.DesiredCapacity > 0 {
|
2014-10-18 05:10:52 +02:00
|
|
|
if err := resourceAwsAutoscalingGroupDrain(d, meta); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-10 23:34:40 +02:00
|
|
|
log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id())
|
2015-10-12 22:50:07 +02:00
|
|
|
deleteopts := autoscaling.DeleteAutoScalingGroupInput{
|
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
|
|
|
ForceDelete: aws.Bool(d.Get("force_delete").(bool)),
|
2014-07-14 17:36:25 +02:00
|
|
|
}
|
|
|
|
|
2015-05-07 01:54:59 +02:00
|
|
|
// We retry the delete operation to handle InUse/InProgress errors coming
|
|
|
|
// from scaling operations. We should be able to sneak in a delete in between
|
|
|
|
// scaling operations within 5m.
|
2016-03-09 23:53:32 +01:00
|
|
|
err = resource.Retry(5*time.Minute, func() *resource.RetryError {
|
2015-05-07 01:54:59 +02:00
|
|
|
if _, err := conn.DeleteAutoScalingGroup(&deleteopts); err != nil {
|
2015-05-20 13:21:23 +02:00
|
|
|
if awserr, ok := err.(awserr.Error); ok {
|
|
|
|
switch awserr.Code() {
|
2015-05-07 01:54:59 +02:00
|
|
|
case "InvalidGroup.NotFound":
|
|
|
|
// Already gone? Sure!
|
|
|
|
return nil
|
|
|
|
case "ResourceInUse", "ScalingActivityInProgress":
|
|
|
|
// These are retryable
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.RetryableError(awserr)
|
2015-05-07 01:54:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Didn't recognize the error, so shouldn't retry.
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.NonRetryableError(err)
|
2014-07-10 21:41:06 +02:00
|
|
|
}
|
2015-05-07 01:54:59 +02:00
|
|
|
// Successful delete
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2014-07-10 21:41:06 +02:00
|
|
|
return err
|
|
|
|
}
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
2016-01-28 18:52:35 +01:00
|
|
|
if g, _ = getAwsAutoscalingGroup(d.Id(), conn); g != nil {
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.RetryableError(
|
|
|
|
fmt.Errorf("Auto Scaling Group still exists"))
|
2015-04-01 16:24:26 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2014-10-18 05:10:52 +02:00
|
|
|
func getAwsAutoscalingGroup(
|
2016-01-28 18:52:35 +01:00
|
|
|
asgName string,
|
|
|
|
conn *autoscaling.AutoScaling) (*autoscaling.Group, error) {
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2015-04-15 22:30:35 +02:00
|
|
|
describeOpts := autoscaling.DescribeAutoScalingGroupsInput{
|
2016-01-28 18:52:35 +01:00
|
|
|
AutoScalingGroupNames: []*string{aws.String(asgName)},
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2014-07-14 17:36:25 +02:00
|
|
|
log.Printf("[DEBUG] AutoScaling Group describe configuration: %#v", describeOpts)
|
2015-05-07 01:54:59 +02:00
|
|
|
describeGroups, err := conn.DescribeAutoScalingGroups(&describeOpts)
|
2014-07-10 01:00:11 +02:00
|
|
|
if err != nil {
|
2015-05-20 13:21:23 +02:00
|
|
|
autoscalingerr, ok := err.(awserr.Error)
|
|
|
|
if ok && autoscalingerr.Code() == "InvalidGroup.NotFound" {
|
2014-10-18 05:10:52 +02:00
|
|
|
return nil, nil
|
2014-10-10 23:34:40 +02:00
|
|
|
}
|
|
|
|
|
2014-10-18 05:10:52 +02:00
|
|
|
return nil, fmt.Errorf("Error retrieving AutoScaling groups: %s", err)
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
|
2014-12-10 02:11:50 +01:00
|
|
|
// Search for the autoscaling group
|
|
|
|
for idx, asc := range describeGroups.AutoScalingGroups {
|
2016-01-28 18:52:35 +01:00
|
|
|
if *asc.AutoScalingGroupName == asgName {
|
2015-04-15 22:30:35 +02:00
|
|
|
return describeGroups.AutoScalingGroups[idx], nil
|
2014-07-10 01:00:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-10 02:11:50 +01:00
|
|
|
return nil, nil
|
2014-10-18 05:10:52 +02:00
|
|
|
}
|
2014-07-10 01:00:11 +02:00
|
|
|
|
2014-10-18 05:10:52 +02:00
|
|
|
func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error {
|
2015-05-07 01:54:59 +02:00
|
|
|
conn := meta.(*AWSClient).autoscalingconn
|
2014-07-15 18:31:49 +02:00
|
|
|
|
2015-10-12 22:50:07 +02:00
|
|
|
if d.Get("force_delete").(bool) {
|
|
|
|
log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-10-18 05:10:52 +02:00
|
|
|
// First, set the capacity to zero so the group will drain
|
|
|
|
log.Printf("[DEBUG] Reducing autoscaling group capacity to zero")
|
2015-04-15 22:30:35 +02:00
|
|
|
opts := autoscaling.UpdateAutoScalingGroupInput{
|
2015-02-20 18:28:20 +01:00
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
2015-07-28 22:29:46 +02:00
|
|
|
DesiredCapacity: aws.Int64(0),
|
|
|
|
MinSize: aws.Int64(0),
|
|
|
|
MaxSize: aws.Int64(0),
|
2014-10-18 05:10:52 +02:00
|
|
|
}
|
2015-05-07 01:54:59 +02:00
|
|
|
if _, err := conn.UpdateAutoScalingGroup(&opts); err != nil {
|
2014-10-18 05:10:52 +02:00
|
|
|
return fmt.Errorf("Error setting capacity to zero to drain: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, wait for the autoscale group to drain
|
|
|
|
log.Printf("[DEBUG] Waiting for group to have zero instances")
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.Retry(10*time.Minute, func() *resource.RetryError {
|
2016-01-28 18:52:35 +01:00
|
|
|
g, err := getAwsAutoscalingGroup(d.Id(), conn)
|
2014-10-18 05:10:52 +02:00
|
|
|
if err != nil {
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.NonRetryableError(err)
|
2014-10-18 05:10:52 +02:00
|
|
|
}
|
|
|
|
if g == nil {
|
2016-01-28 18:52:35 +01:00
|
|
|
log.Printf("[INFO] Autoscaling Group %q not found", d.Id())
|
|
|
|
d.SetId("")
|
2014-10-18 05:10:52 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(g.Instances) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.RetryableError(
|
|
|
|
fmt.Errorf("group still has %d instances", len(g.Instances)))
|
2014-10-18 05:10:52 +02:00
|
|
|
})
|
2015-05-07 01:34:20 +02:00
|
|
|
}
|
2015-05-14 19:45:21 +02:00
|
|
|
|
2016-02-29 21:58:41 +01:00
|
|
|
func enableASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoScaling) error {
|
|
|
|
props := &autoscaling.EnableMetricsCollectionInput{
|
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
|
|
|
Granularity: aws.String(d.Get("metrics_granularity").(string)),
|
|
|
|
Metrics: expandStringList(d.Get("enabled_metrics").(*schema.Set).List()),
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[INFO] Enabling metrics collection for the ASG: %s", d.Id())
|
|
|
|
_, metricsErr := conn.EnableMetricsCollection(props)
|
|
|
|
if metricsErr != nil {
|
|
|
|
return metricsErr
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoScaling) error {
|
|
|
|
|
|
|
|
o, n := d.GetChange("enabled_metrics")
|
|
|
|
if o == nil {
|
|
|
|
o = new(schema.Set)
|
|
|
|
}
|
|
|
|
if n == nil {
|
|
|
|
n = new(schema.Set)
|
|
|
|
}
|
|
|
|
|
|
|
|
os := o.(*schema.Set)
|
|
|
|
ns := n.(*schema.Set)
|
|
|
|
|
|
|
|
disableMetrics := os.Difference(ns)
|
|
|
|
if disableMetrics.Len() != 0 {
|
|
|
|
props := &autoscaling.DisableMetricsCollectionInput{
|
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
|
|
|
Metrics: expandStringList(disableMetrics.List()),
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := conn.DisableMetricsCollection(props)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failure to Disable metrics collection types for ASG %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
enabledMetrics := ns.Difference(os)
|
|
|
|
if enabledMetrics.Len() != 0 {
|
|
|
|
props := &autoscaling.EnableMetricsCollectionInput{
|
|
|
|
AutoScalingGroupName: aws.String(d.Id()),
|
|
|
|
Metrics: expandStringList(enabledMetrics.List()),
|
2016-07-20 20:36:45 +02:00
|
|
|
Granularity: aws.String(d.Get("metrics_granularity").(string)),
|
2016-02-29 21:58:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err := conn.EnableMetricsCollection(props)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failure to Enable metrics collection types for ASG %s: %s", d.Id(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-14 19:45:21 +02:00
|
|
|
// Returns a mapping of the instance states of all the ELBs attached to the
|
|
|
|
// provided ASG.
|
|
|
|
//
|
|
|
|
// Nested like: lbName -> instanceId -> instanceState
|
2015-05-29 09:55:59 +02:00
|
|
|
func getLBInstanceStates(g *autoscaling.Group, meta interface{}) (map[string]map[string]string, error) {
|
2015-05-14 19:45:21 +02:00
|
|
|
lbInstanceStates := make(map[string]map[string]string)
|
|
|
|
elbconn := meta.(*AWSClient).elbconn
|
|
|
|
|
|
|
|
for _, lbName := range g.LoadBalancerNames {
|
|
|
|
lbInstanceStates[*lbName] = make(map[string]string)
|
|
|
|
opts := &elb.DescribeInstanceHealthInput{LoadBalancerName: lbName}
|
|
|
|
r, err := elbconn.DescribeInstanceHealth(opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, is := range r.InstanceStates {
|
2015-08-17 20:27:16 +02:00
|
|
|
if is.InstanceId == nil || is.State == nil {
|
2015-05-14 19:45:21 +02:00
|
|
|
continue
|
|
|
|
}
|
2015-08-17 20:27:16 +02:00
|
|
|
lbInstanceStates[*lbName][*is.InstanceId] = *is.State
|
2015-05-14 19:45:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return lbInstanceStates, nil
|
|
|
|
}
|
2015-07-14 17:19:10 +02:00
|
|
|
|
|
|
|
func expandVpcZoneIdentifiers(list []interface{}) *string {
|
|
|
|
strs := make([]string, len(list))
|
|
|
|
for _, s := range list {
|
|
|
|
strs = append(strs, s.(string))
|
|
|
|
}
|
|
|
|
return aws.String(strings.Join(strs, ","))
|
|
|
|
}
|