2014-07-03 00:55:28 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
2015-05-04 00:12:50 +02:00
|
|
|
"bytes"
|
|
|
|
"encoding/json"
|
2015-04-16 22:18:01 +02:00
|
|
|
"fmt"
|
2016-05-27 00:29:42 +02:00
|
|
|
"reflect"
|
2015-04-26 18:16:52 +02:00
|
|
|
"sort"
|
2015-12-12 00:52:02 +01:00
|
|
|
"strconv"
|
2014-07-17 00:51:50 +02:00
|
|
|
"strings"
|
|
|
|
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
2016-03-05 23:16:16 +01:00
|
|
|
"github.com/aws/aws-sdk-go/service/apigateway"
|
2016-01-15 11:29:15 +01:00
|
|
|
"github.com/aws/aws-sdk-go/service/autoscaling"
|
2015-07-07 09:00:05 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/cloudformation"
|
2016-02-29 14:07:45 +01:00
|
|
|
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
2017-04-21 11:53:48 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/cognitoidentity"
|
2017-02-02 23:29:13 +01:00
|
|
|
"github.com/aws/aws-sdk-go/service/configservice"
|
2015-09-13 18:57:58 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/directoryservice"
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/ec2"
|
2015-05-04 00:12:50 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/ecs"
|
2015-06-08 21:05:00 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/elasticache"
|
2016-03-28 21:11:25 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/elasticbeanstalk"
|
2015-10-02 00:12:46 +02:00
|
|
|
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/elb"
|
2016-07-22 00:37:58 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/kinesis"
|
2016-02-16 06:38:17 +01:00
|
|
|
"github.com/aws/aws-sdk-go/service/lambda"
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/rds"
|
2015-11-12 00:37:56 +01:00
|
|
|
"github.com/aws/aws-sdk-go/service/redshift"
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/route53"
|
2014-10-21 19:49:27 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2017-01-20 15:00:32 +01:00
|
|
|
"gopkg.in/yaml.v2"
|
2014-07-03 00:55:28 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Takes the result of flatmap.Expand for an array of listeners and
|
|
|
|
// returns ELB API compatible objects
|
2015-04-16 22:28:18 +02:00
|
|
|
func expandListeners(configured []interface{}) ([]*elb.Listener, error) {
|
2015-04-16 22:18:01 +02:00
|
|
|
listeners := make([]*elb.Listener, 0, len(configured))
|
2014-07-03 00:55:28 +02:00
|
|
|
|
|
|
|
// Loop over our configured listeners and create
|
2016-09-12 08:14:24 +02:00
|
|
|
// an array of aws-sdk-go compatible objects
|
2014-10-10 08:58:48 +02:00
|
|
|
for _, lRaw := range configured {
|
|
|
|
data := lRaw.(map[string]interface{})
|
2014-07-25 00:50:18 +02:00
|
|
|
|
2015-04-16 22:18:01 +02:00
|
|
|
ip := int64(data["instance_port"].(int))
|
|
|
|
lp := int64(data["lb_port"].(int))
|
|
|
|
l := &elb.Listener{
|
|
|
|
InstancePort: &ip,
|
2015-03-02 16:44:06 +01:00
|
|
|
InstanceProtocol: aws.String(data["instance_protocol"].(string)),
|
2015-04-16 22:18:01 +02:00
|
|
|
LoadBalancerPort: &lp,
|
2015-03-02 16:44:06 +01:00
|
|
|
Protocol: aws.String(data["lb_protocol"].(string)),
|
2014-07-03 00:55:28 +02:00
|
|
|
}
|
|
|
|
|
2014-10-10 08:58:48 +02:00
|
|
|
if v, ok := data["ssl_certificate_id"]; ok {
|
2015-08-17 20:27:16 +02:00
|
|
|
l.SSLCertificateId = aws.String(v.(string))
|
2014-08-11 01:09:05 +02:00
|
|
|
}
|
|
|
|
|
2015-11-13 20:53:52 +01:00
|
|
|
var valid bool
|
|
|
|
if l.SSLCertificateId != nil && *l.SSLCertificateId != "" {
|
|
|
|
// validate the protocol is correct
|
|
|
|
for _, p := range []string{"https", "ssl"} {
|
2016-02-29 19:04:47 +01:00
|
|
|
if (strings.ToLower(*l.InstanceProtocol) == p) || (strings.ToLower(*l.Protocol) == p) {
|
2015-11-13 20:53:52 +01:00
|
|
|
valid = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
valid = true
|
|
|
|
}
|
2015-11-12 18:10:52 +01:00
|
|
|
|
2015-11-13 20:53:52 +01:00
|
|
|
if valid {
|
|
|
|
listeners = append(listeners, l)
|
|
|
|
} else {
|
|
|
|
return nil, fmt.Errorf("[ERR] ELB Listener: ssl_certificate_id may be set only when protocol is 'https' or 'ssl'")
|
|
|
|
}
|
|
|
|
}
|
2014-07-03 00:55:28 +02:00
|
|
|
|
2014-07-25 00:50:18 +02:00
|
|
|
return listeners, nil
|
2014-07-03 00:55:28 +02:00
|
|
|
}
|
2014-07-03 01:57:57 +02:00
|
|
|
|
2015-05-04 00:12:50 +02:00
|
|
|
// Takes the result of flatmap. Expand for an array of listeners and
|
|
|
|
// returns ECS Volume compatible objects
|
|
|
|
func expandEcsVolumes(configured []interface{}) ([]*ecs.Volume, error) {
|
|
|
|
volumes := make([]*ecs.Volume, 0, len(configured))
|
|
|
|
|
|
|
|
// Loop over our configured volumes and create
|
|
|
|
// an array of aws-sdk-go compatible objects
|
|
|
|
for _, lRaw := range configured {
|
|
|
|
data := lRaw.(map[string]interface{})
|
|
|
|
|
|
|
|
l := &ecs.Volume{
|
|
|
|
Name: aws.String(data["name"].(string)),
|
2015-11-08 20:31:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
hostPath := data["host_path"].(string)
|
|
|
|
if hostPath != "" {
|
|
|
|
l.Host = &ecs.HostVolumeProperties{
|
|
|
|
SourcePath: aws.String(hostPath),
|
|
|
|
}
|
2015-05-04 00:12:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
volumes = append(volumes, l)
|
|
|
|
}
|
|
|
|
|
|
|
|
return volumes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Takes JSON in a string. Decodes JSON into
|
|
|
|
// an array of ecs.ContainerDefinition compatible objects
|
|
|
|
func expandEcsContainerDefinitions(rawDefinitions string) ([]*ecs.ContainerDefinition, error) {
|
|
|
|
var definitions []*ecs.ContainerDefinition
|
|
|
|
|
|
|
|
err := json.Unmarshal([]byte(rawDefinitions), &definitions)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Error decoding JSON: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return definitions, nil
|
|
|
|
}
|
|
|
|
|
2015-05-05 00:10:54 +02:00
|
|
|
// Takes the result of flatmap. Expand for an array of load balancers and
|
|
|
|
// returns ecs.LoadBalancer compatible objects
|
|
|
|
func expandEcsLoadBalancers(configured []interface{}) []*ecs.LoadBalancer {
|
|
|
|
loadBalancers := make([]*ecs.LoadBalancer, 0, len(configured))
|
|
|
|
|
|
|
|
// Loop over our configured load balancers and create
|
|
|
|
// an array of aws-sdk-go compatible objects
|
|
|
|
for _, lRaw := range configured {
|
|
|
|
data := lRaw.(map[string]interface{})
|
|
|
|
|
|
|
|
l := &ecs.LoadBalancer{
|
2016-08-23 18:19:43 +02:00
|
|
|
ContainerName: aws.String(data["container_name"].(string)),
|
|
|
|
ContainerPort: aws.Int64(int64(data["container_port"].(int))),
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := data["elb_name"]; ok && v.(string) != "" {
|
|
|
|
l.LoadBalancerName = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
if v, ok := data["target_group_arn"]; ok && v.(string) != "" {
|
|
|
|
l.TargetGroupArn = aws.String(v.(string))
|
2015-05-05 00:10:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
loadBalancers = append(loadBalancers, l)
|
|
|
|
}
|
|
|
|
|
|
|
|
return loadBalancers
|
|
|
|
}
|
|
|
|
|
2015-05-05 05:43:31 +02:00
|
|
|
// Takes the result of flatmap.Expand for an array of ingress/egress security
|
|
|
|
// group rules and returns EC2 API compatible objects. This function will error
|
|
|
|
// if it finds invalid permissions input, namely a protocol of "-1" with either
|
|
|
|
// to_port or from_port set to a non-zero value.
|
2015-04-16 22:28:18 +02:00
|
|
|
func expandIPPerms(
|
2015-08-17 20:27:16 +02:00
|
|
|
group *ec2.SecurityGroup, configured []interface{}) ([]*ec2.IpPermission, error) {
|
2016-03-04 09:28:37 +01:00
|
|
|
vpc := group.VpcId != nil && *group.VpcId != ""
|
2015-03-18 14:47:59 +01:00
|
|
|
|
2015-08-17 20:27:16 +02:00
|
|
|
perms := make([]*ec2.IpPermission, len(configured))
|
2015-03-09 16:02:27 +01:00
|
|
|
for i, mRaw := range configured {
|
2015-08-17 20:27:16 +02:00
|
|
|
var perm ec2.IpPermission
|
2015-03-09 16:02:27 +01:00
|
|
|
m := mRaw.(map[string]interface{})
|
|
|
|
|
2015-07-28 22:29:46 +02:00
|
|
|
perm.FromPort = aws.Int64(int64(m["from_port"].(int)))
|
|
|
|
perm.ToPort = aws.Int64(int64(m["to_port"].(int)))
|
2015-08-17 20:27:16 +02:00
|
|
|
perm.IpProtocol = aws.String(m["protocol"].(string))
|
2015-03-09 16:02:27 +01:00
|
|
|
|
2015-05-05 05:43:31 +02:00
|
|
|
// When protocol is "-1", AWS won't store any ports for the
|
|
|
|
// rule, but also won't error if the user specifies ports other
|
|
|
|
// than '0'. Force the user to make a deliberate '0' port
|
|
|
|
// choice when specifying a "-1" protocol, and tell them about
|
|
|
|
// AWS's behavior in the error message.
|
2015-08-17 20:27:16 +02:00
|
|
|
if *perm.IpProtocol == "-1" && (*perm.FromPort != 0 || *perm.ToPort != 0) {
|
2015-05-05 05:43:31 +02:00
|
|
|
return nil, fmt.Errorf(
|
2016-11-29 16:49:34 +01:00
|
|
|
"from_port (%d) and to_port (%d) must both be 0 to use the 'ALL' \"-1\" protocol!",
|
2015-05-05 05:43:31 +02:00
|
|
|
*perm.FromPort, *perm.ToPort)
|
|
|
|
}
|
|
|
|
|
2015-03-09 16:02:27 +01:00
|
|
|
var groups []string
|
|
|
|
if raw, ok := m["security_groups"]; ok {
|
|
|
|
list := raw.(*schema.Set).List()
|
|
|
|
for _, v := range list {
|
|
|
|
groups = append(groups, v.(string))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v, ok := m["self"]; ok && v.(bool) {
|
2015-03-18 14:47:59 +01:00
|
|
|
if vpc {
|
2015-08-17 20:27:16 +02:00
|
|
|
groups = append(groups, *group.GroupId)
|
2015-03-18 14:47:59 +01:00
|
|
|
} else {
|
|
|
|
groups = append(groups, *group.GroupName)
|
|
|
|
}
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(groups) > 0 {
|
2015-08-17 20:27:16 +02:00
|
|
|
perm.UserIdGroupPairs = make([]*ec2.UserIdGroupPair, len(groups))
|
2015-03-09 16:02:27 +01:00
|
|
|
for i, name := range groups {
|
|
|
|
ownerId, id := "", name
|
|
|
|
if items := strings.Split(id, "/"); len(items) > 1 {
|
|
|
|
ownerId, id = items[0], items[1]
|
|
|
|
}
|
|
|
|
|
2015-08-17 20:27:16 +02:00
|
|
|
perm.UserIdGroupPairs[i] = &ec2.UserIdGroupPair{
|
|
|
|
GroupId: aws.String(id),
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
2015-07-07 20:06:36 +02:00
|
|
|
|
|
|
|
if ownerId != "" {
|
2015-08-17 20:27:16 +02:00
|
|
|
perm.UserIdGroupPairs[i].UserId = aws.String(ownerId)
|
2015-07-07 20:06:36 +02:00
|
|
|
}
|
|
|
|
|
2015-03-18 14:47:59 +01:00
|
|
|
if !vpc {
|
2015-08-17 20:27:16 +02:00
|
|
|
perm.UserIdGroupPairs[i].GroupId = nil
|
|
|
|
perm.UserIdGroupPairs[i].GroupName = aws.String(id)
|
2015-03-18 14:47:59 +01:00
|
|
|
}
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if raw, ok := m["cidr_blocks"]; ok {
|
|
|
|
list := raw.([]interface{})
|
2015-04-21 17:57:50 +02:00
|
|
|
for _, v := range list {
|
2015-08-17 20:27:16 +02:00
|
|
|
perm.IpRanges = append(perm.IpRanges, &ec2.IpRange{CidrIp: aws.String(v.(string))})
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
}
|
2017-03-14 12:02:50 +01:00
|
|
|
if raw, ok := m["ipv6_cidr_blocks"]; ok {
|
|
|
|
list := raw.([]interface{})
|
|
|
|
for _, v := range list {
|
|
|
|
perm.Ipv6Ranges = append(perm.Ipv6Ranges, &ec2.Ipv6Range{CidrIpv6: aws.String(v.(string))})
|
|
|
|
}
|
|
|
|
}
|
2015-03-09 16:02:27 +01:00
|
|
|
|
2016-06-06 12:07:19 +02:00
|
|
|
if raw, ok := m["prefix_list_ids"]; ok {
|
|
|
|
list := raw.([]interface{})
|
|
|
|
for _, v := range list {
|
|
|
|
perm.PrefixListIds = append(perm.PrefixListIds, &ec2.PrefixListId{PrefixListId: aws.String(v.(string))})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-16 22:18:01 +02:00
|
|
|
perms[i] = &perm
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
|
2015-05-05 05:43:31 +02:00
|
|
|
return perms, nil
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
|
2014-10-22 23:22:30 +02:00
|
|
|
// Takes the result of flatmap.Expand for an array of parameters and
|
|
|
|
// returns Parameter API compatible objects
|
2015-04-16 22:28:18 +02:00
|
|
|
func expandParameters(configured []interface{}) ([]*rds.Parameter, error) {
|
2015-07-14 20:29:03 +02:00
|
|
|
var parameters []*rds.Parameter
|
2014-10-22 23:22:30 +02:00
|
|
|
|
|
|
|
// Loop over our configured parameters and create
|
2016-09-12 08:14:24 +02:00
|
|
|
// an array of aws-sdk-go compatible objects
|
2014-10-22 23:22:30 +02:00
|
|
|
for _, pRaw := range configured {
|
|
|
|
data := pRaw.(map[string]interface{})
|
|
|
|
|
2015-07-14 20:29:03 +02:00
|
|
|
if data["name"].(string) == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-04-16 22:18:01 +02:00
|
|
|
p := &rds.Parameter{
|
2015-02-26 16:33:33 +01:00
|
|
|
ApplyMethod: aws.String(data["apply_method"].(string)),
|
|
|
|
ParameterName: aws.String(data["name"].(string)),
|
|
|
|
ParameterValue: aws.String(data["value"].(string)),
|
2014-10-22 23:22:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
parameters = append(parameters, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
return parameters, nil
|
|
|
|
}
|
|
|
|
|
2015-11-12 00:37:56 +01:00
|
|
|
func expandRedshiftParameters(configured []interface{}) ([]*redshift.Parameter, error) {
|
|
|
|
var parameters []*redshift.Parameter
|
|
|
|
|
|
|
|
// Loop over our configured parameters and create
|
2016-09-12 08:14:24 +02:00
|
|
|
// an array of aws-sdk-go compatible objects
|
2015-11-12 00:37:56 +01:00
|
|
|
for _, pRaw := range configured {
|
|
|
|
data := pRaw.(map[string]interface{})
|
|
|
|
|
|
|
|
if data["name"].(string) == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
p := &redshift.Parameter{
|
|
|
|
ParameterName: aws.String(data["name"].(string)),
|
|
|
|
ParameterValue: aws.String(data["value"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
parameters = append(parameters, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
return parameters, nil
|
|
|
|
}
|
|
|
|
|
2015-12-21 10:15:52 +01:00
|
|
|
func expandOptionConfiguration(configured []interface{}) ([]*rds.OptionConfiguration, error) {
|
|
|
|
var option []*rds.OptionConfiguration
|
|
|
|
|
|
|
|
for _, pRaw := range configured {
|
|
|
|
data := pRaw.(map[string]interface{})
|
|
|
|
|
|
|
|
o := &rds.OptionConfiguration{
|
|
|
|
OptionName: aws.String(data["option_name"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
if raw, ok := data["port"]; ok {
|
|
|
|
port := raw.(int)
|
|
|
|
if port != 0 {
|
|
|
|
o.Port = aws.Int64(int64(port))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if raw, ok := data["db_security_group_memberships"]; ok {
|
|
|
|
memberships := expandStringList(raw.(*schema.Set).List())
|
|
|
|
if len(memberships) > 0 {
|
|
|
|
o.DBSecurityGroupMemberships = memberships
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if raw, ok := data["vpc_security_group_memberships"]; ok {
|
|
|
|
memberships := expandStringList(raw.(*schema.Set).List())
|
|
|
|
if len(memberships) > 0 {
|
|
|
|
o.VpcSecurityGroupMemberships = memberships
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-09 20:19:43 +02:00
|
|
|
if raw, ok := data["option_settings"]; ok {
|
|
|
|
o.OptionSettings = expandOptionSetting(raw.(*schema.Set).List())
|
|
|
|
}
|
|
|
|
|
2015-12-21 10:15:52 +01:00
|
|
|
option = append(option, o)
|
|
|
|
}
|
|
|
|
|
|
|
|
return option, nil
|
|
|
|
}
|
|
|
|
|
2016-05-09 20:19:43 +02:00
|
|
|
func expandOptionSetting(list []interface{}) []*rds.OptionSetting {
|
|
|
|
options := make([]*rds.OptionSetting, 0, len(list))
|
|
|
|
|
|
|
|
for _, oRaw := range list {
|
|
|
|
data := oRaw.(map[string]interface{})
|
|
|
|
|
|
|
|
o := &rds.OptionSetting{
|
|
|
|
Name: aws.String(data["name"].(string)),
|
|
|
|
Value: aws.String(data["value"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
options = append(options, o)
|
|
|
|
}
|
|
|
|
|
|
|
|
return options
|
|
|
|
}
|
|
|
|
|
2015-06-08 21:05:00 +02:00
|
|
|
// Takes the result of flatmap.Expand for an array of parameters and
|
|
|
|
// returns Parameter API compatible objects
|
|
|
|
func expandElastiCacheParameters(configured []interface{}) ([]*elasticache.ParameterNameValue, error) {
|
|
|
|
parameters := make([]*elasticache.ParameterNameValue, 0, len(configured))
|
|
|
|
|
|
|
|
// Loop over our configured parameters and create
|
2016-09-12 08:14:24 +02:00
|
|
|
// an array of aws-sdk-go compatible objects
|
2015-06-08 21:05:00 +02:00
|
|
|
for _, pRaw := range configured {
|
|
|
|
data := pRaw.(map[string]interface{})
|
|
|
|
|
|
|
|
p := &elasticache.ParameterNameValue{
|
|
|
|
ParameterName: aws.String(data["name"].(string)),
|
|
|
|
ParameterValue: aws.String(data["value"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
parameters = append(parameters, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
return parameters, nil
|
|
|
|
}
|
|
|
|
|
2015-10-30 23:45:43 +01:00
|
|
|
// Flattens an access log into something that flatmap.Flatten() can handle
|
2015-11-11 22:25:24 +01:00
|
|
|
func flattenAccessLog(l *elb.AccessLog) []map[string]interface{} {
|
2015-11-03 23:30:18 +01:00
|
|
|
result := make([]map[string]interface{}, 0, 1)
|
|
|
|
|
2017-01-10 00:10:58 +01:00
|
|
|
if l == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2015-11-04 18:50:34 +01:00
|
|
|
|
2017-01-10 00:10:58 +01:00
|
|
|
r := make(map[string]interface{})
|
|
|
|
if l.S3BucketName != nil {
|
|
|
|
r["bucket"] = *l.S3BucketName
|
|
|
|
}
|
2015-11-04 18:50:34 +01:00
|
|
|
|
2017-01-10 00:10:58 +01:00
|
|
|
if l.S3BucketPrefix != nil {
|
|
|
|
r["bucket_prefix"] = *l.S3BucketPrefix
|
|
|
|
}
|
2015-11-04 18:50:34 +01:00
|
|
|
|
2017-01-10 00:10:58 +01:00
|
|
|
if l.EmitInterval != nil {
|
|
|
|
r["interval"] = *l.EmitInterval
|
|
|
|
}
|
2016-08-24 11:07:47 +02:00
|
|
|
|
2017-01-10 00:10:58 +01:00
|
|
|
if l.Enabled != nil {
|
|
|
|
r["enabled"] = *l.Enabled
|
2015-11-03 23:30:18 +01:00
|
|
|
}
|
|
|
|
|
2017-01-10 00:10:58 +01:00
|
|
|
result = append(result, r)
|
|
|
|
|
2015-11-03 23:30:18 +01:00
|
|
|
return result
|
2015-10-30 23:45:43 +01:00
|
|
|
}
|
|
|
|
|
2015-12-12 00:52:02 +01:00
|
|
|
// Takes the result of flatmap.Expand for an array of step adjustments and
|
|
|
|
// returns a []*autoscaling.StepAdjustment.
|
|
|
|
func expandStepAdjustments(configured []interface{}) ([]*autoscaling.StepAdjustment, error) {
|
|
|
|
var adjustments []*autoscaling.StepAdjustment
|
|
|
|
|
|
|
|
// Loop over our configured step adjustments and create an array
|
|
|
|
// of aws-sdk-go compatible objects. We're forced to convert strings
|
|
|
|
// to floats here because there's no way to detect whether or not
|
|
|
|
// an uninitialized, optional schema element is "0.0" deliberately.
|
|
|
|
// With strings, we can test for "", which is definitely an empty
|
|
|
|
// struct value.
|
|
|
|
for _, raw := range configured {
|
|
|
|
data := raw.(map[string]interface{})
|
|
|
|
a := &autoscaling.StepAdjustment{
|
|
|
|
ScalingAdjustment: aws.Int64(int64(data["scaling_adjustment"].(int))),
|
|
|
|
}
|
|
|
|
if data["metric_interval_lower_bound"] != "" {
|
|
|
|
bound := data["metric_interval_lower_bound"]
|
|
|
|
switch bound := bound.(type) {
|
|
|
|
case string:
|
|
|
|
f, err := strconv.ParseFloat(bound, 64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"metric_interval_lower_bound must be a float value represented as a string")
|
|
|
|
}
|
|
|
|
a.MetricIntervalLowerBound = aws.Float64(f)
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"metric_interval_lower_bound isn't a string. This is a bug. Please file an issue.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if data["metric_interval_upper_bound"] != "" {
|
|
|
|
bound := data["metric_interval_upper_bound"]
|
|
|
|
switch bound := bound.(type) {
|
|
|
|
case string:
|
|
|
|
f, err := strconv.ParseFloat(bound, 64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"metric_interval_upper_bound must be a float value represented as a string")
|
|
|
|
}
|
|
|
|
a.MetricIntervalUpperBound = aws.Float64(f)
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"metric_interval_upper_bound isn't a string. This is a bug. Please file an issue.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
adjustments = append(adjustments, a)
|
|
|
|
}
|
|
|
|
|
|
|
|
return adjustments, nil
|
|
|
|
}
|
|
|
|
|
2014-07-30 16:15:22 +02:00
|
|
|
// Flattens a health check into something that flatmap.Flatten()
|
|
|
|
// can handle
|
2015-04-16 22:28:18 +02:00
|
|
|
func flattenHealthCheck(check *elb.HealthCheck) []map[string]interface{} {
|
2014-07-30 16:15:22 +02:00
|
|
|
result := make([]map[string]interface{}, 0, 1)
|
|
|
|
|
|
|
|
chk := make(map[string]interface{})
|
2015-03-02 16:44:06 +01:00
|
|
|
chk["unhealthy_threshold"] = *check.UnhealthyThreshold
|
|
|
|
chk["healthy_threshold"] = *check.HealthyThreshold
|
|
|
|
chk["target"] = *check.Target
|
|
|
|
chk["timeout"] = *check.Timeout
|
|
|
|
chk["interval"] = *check.Interval
|
2014-07-30 16:15:22 +02:00
|
|
|
|
|
|
|
result = append(result, chk)
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2016-03-09 22:00:30 +01:00
|
|
|
// Flattens an array of UserSecurityGroups into a []*ec2.GroupIdentifier
|
2016-03-04 09:28:37 +01:00
|
|
|
func flattenSecurityGroups(list []*ec2.UserIdGroupPair, ownerId *string) []*ec2.GroupIdentifier {
|
|
|
|
result := make([]*ec2.GroupIdentifier, 0, len(list))
|
2015-03-09 16:02:27 +01:00
|
|
|
for _, g := range list {
|
2016-03-04 09:28:37 +01:00
|
|
|
var userId *string
|
|
|
|
if g.UserId != nil && *g.UserId != "" && (ownerId == nil || *ownerId != *g.UserId) {
|
|
|
|
userId = g.UserId
|
|
|
|
}
|
2016-03-09 22:50:01 +01:00
|
|
|
// userid nil here for same vpc groups
|
2016-03-04 09:28:37 +01:00
|
|
|
|
|
|
|
vpc := g.GroupName == nil || *g.GroupName == ""
|
|
|
|
var id *string
|
|
|
|
if vpc {
|
|
|
|
id = g.GroupId
|
|
|
|
} else {
|
|
|
|
id = g.GroupName
|
|
|
|
}
|
|
|
|
|
2016-03-09 22:50:01 +01:00
|
|
|
// id is groupid for vpcs
|
|
|
|
// id is groupname for non vpc (classic)
|
|
|
|
|
2016-03-04 09:28:37 +01:00
|
|
|
if userId != nil {
|
|
|
|
id = aws.String(*userId + "/" + *id)
|
|
|
|
}
|
|
|
|
|
|
|
|
if vpc {
|
|
|
|
result = append(result, &ec2.GroupIdentifier{
|
|
|
|
GroupId: id,
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
result = append(result, &ec2.GroupIdentifier{
|
|
|
|
GroupId: g.GroupId,
|
|
|
|
GroupName: id,
|
|
|
|
})
|
|
|
|
}
|
2015-03-09 16:02:27 +01:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2014-07-16 23:02:47 +02:00
|
|
|
// Flattens an array of Instances into a []string
|
2015-04-16 22:28:18 +02:00
|
|
|
func flattenInstances(list []*elb.Instance) []string {
|
2014-07-16 23:02:47 +02:00
|
|
|
result := make([]string, 0, len(list))
|
|
|
|
for _, i := range list {
|
2015-08-17 20:27:16 +02:00
|
|
|
result = append(result, *i.InstanceId)
|
2015-03-02 16:44:06 +01:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expands an array of String Instance IDs into a []Instances
|
2015-04-16 22:28:18 +02:00
|
|
|
func expandInstanceString(list []interface{}) []*elb.Instance {
|
2015-04-16 22:18:01 +02:00
|
|
|
result := make([]*elb.Instance, 0, len(list))
|
2015-03-02 16:44:06 +01:00
|
|
|
for _, i := range list {
|
2015-08-17 20:27:16 +02:00
|
|
|
result = append(result, &elb.Instance{InstanceId: aws.String(i.(string))})
|
2014-07-16 23:02:47 +02:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-04-26 18:16:52 +02:00
|
|
|
// Flattens an array of Backend Descriptions into a a map of instance_port to policy names.
|
|
|
|
func flattenBackendPolicies(backends []*elb.BackendServerDescription) map[int64][]string {
|
|
|
|
policies := make(map[int64][]string)
|
|
|
|
for _, i := range backends {
|
|
|
|
for _, p := range i.PolicyNames {
|
|
|
|
policies[*i.InstancePort] = append(policies[*i.InstancePort], *p)
|
|
|
|
}
|
|
|
|
sort.Strings(policies[*i.InstancePort])
|
|
|
|
}
|
|
|
|
return policies
|
|
|
|
}
|
|
|
|
|
2014-10-11 01:35:52 +02:00
|
|
|
// Flattens an array of Listeners into a []map[string]interface{}
|
2015-04-16 22:28:18 +02:00
|
|
|
func flattenListeners(list []*elb.ListenerDescription) []map[string]interface{} {
|
2014-10-11 01:35:52 +02:00
|
|
|
result := make([]map[string]interface{}, 0, len(list))
|
|
|
|
for _, i := range list {
|
2015-03-02 16:44:06 +01:00
|
|
|
l := map[string]interface{}{
|
|
|
|
"instance_port": *i.Listener.InstancePort,
|
|
|
|
"instance_protocol": strings.ToLower(*i.Listener.InstanceProtocol),
|
|
|
|
"lb_port": *i.Listener.LoadBalancerPort,
|
|
|
|
"lb_protocol": strings.ToLower(*i.Listener.Protocol),
|
|
|
|
}
|
|
|
|
// SSLCertificateID is optional, and may be nil
|
2015-08-17 20:27:16 +02:00
|
|
|
if i.Listener.SSLCertificateId != nil {
|
|
|
|
l["ssl_certificate_id"] = *i.Listener.SSLCertificateId
|
2015-03-02 16:44:06 +01:00
|
|
|
}
|
|
|
|
result = append(result, l)
|
2014-10-11 01:35:52 +02:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-05-04 00:12:50 +02:00
|
|
|
// Flattens an array of Volumes into a []map[string]interface{}
|
|
|
|
func flattenEcsVolumes(list []*ecs.Volume) []map[string]interface{} {
|
|
|
|
result := make([]map[string]interface{}, 0, len(list))
|
|
|
|
for _, volume := range list {
|
|
|
|
l := map[string]interface{}{
|
2015-11-08 20:31:18 +01:00
|
|
|
"name": *volume.Name,
|
2015-05-04 00:12:50 +02:00
|
|
|
}
|
2015-11-08 20:31:18 +01:00
|
|
|
|
|
|
|
if volume.Host.SourcePath != nil {
|
|
|
|
l["host_path"] = *volume.Host.SourcePath
|
|
|
|
}
|
|
|
|
|
2015-05-04 00:12:50 +02:00
|
|
|
result = append(result, l)
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flattens an array of ECS LoadBalancers into a []map[string]interface{}
|
|
|
|
func flattenEcsLoadBalancers(list []*ecs.LoadBalancer) []map[string]interface{} {
|
|
|
|
result := make([]map[string]interface{}, 0, len(list))
|
|
|
|
for _, loadBalancer := range list {
|
|
|
|
l := map[string]interface{}{
|
|
|
|
"container_name": *loadBalancer.ContainerName,
|
|
|
|
"container_port": *loadBalancer.ContainerPort,
|
|
|
|
}
|
2016-08-23 18:19:43 +02:00
|
|
|
|
|
|
|
if loadBalancer.LoadBalancerName != nil {
|
|
|
|
l["elb_name"] = *loadBalancer.LoadBalancerName
|
|
|
|
}
|
|
|
|
|
|
|
|
if loadBalancer.TargetGroupArn != nil {
|
|
|
|
l["target_group_arn"] = *loadBalancer.TargetGroupArn
|
|
|
|
}
|
|
|
|
|
2015-05-04 00:12:50 +02:00
|
|
|
result = append(result, l)
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encodes an array of ecs.ContainerDefinitions into a JSON string
|
|
|
|
func flattenEcsContainerDefinitions(definitions []*ecs.ContainerDefinition) (string, error) {
|
|
|
|
byteArray, err := json.Marshal(definitions)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("Error encoding to JSON: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
n := bytes.Index(byteArray, []byte{0})
|
|
|
|
return string(byteArray[:n]), nil
|
|
|
|
}
|
|
|
|
|
2016-05-09 20:19:43 +02:00
|
|
|
// Flattens an array of Options into a []map[string]interface{}
|
2015-12-21 10:15:52 +01:00
|
|
|
func flattenOptions(list []*rds.Option) []map[string]interface{} {
|
|
|
|
result := make([]map[string]interface{}, 0, len(list))
|
|
|
|
for _, i := range list {
|
|
|
|
if i.OptionName != nil {
|
|
|
|
r := make(map[string]interface{})
|
|
|
|
r["option_name"] = strings.ToLower(*i.OptionName)
|
|
|
|
// Default empty string, guard against nil parameter values
|
|
|
|
r["port"] = ""
|
|
|
|
if i.Port != nil {
|
|
|
|
r["port"] = int(*i.Port)
|
|
|
|
}
|
|
|
|
if i.VpcSecurityGroupMemberships != nil {
|
|
|
|
vpcs := make([]string, 0, len(i.VpcSecurityGroupMemberships))
|
|
|
|
for _, vpc := range i.VpcSecurityGroupMemberships {
|
|
|
|
id := vpc.VpcSecurityGroupId
|
|
|
|
vpcs = append(vpcs, *id)
|
|
|
|
}
|
|
|
|
|
|
|
|
r["vpc_security_group_memberships"] = vpcs
|
|
|
|
}
|
|
|
|
if i.DBSecurityGroupMemberships != nil {
|
|
|
|
dbs := make([]string, 0, len(i.DBSecurityGroupMemberships))
|
|
|
|
for _, db := range i.DBSecurityGroupMemberships {
|
|
|
|
id := db.DBSecurityGroupName
|
|
|
|
dbs = append(dbs, *id)
|
|
|
|
}
|
|
|
|
|
|
|
|
r["db_security_group_memberships"] = dbs
|
|
|
|
}
|
2016-05-09 20:19:43 +02:00
|
|
|
if i.OptionSettings != nil {
|
|
|
|
settings := make([]map[string]interface{}, 0, len(i.OptionSettings))
|
|
|
|
for _, j := range i.OptionSettings {
|
2016-09-26 14:52:33 +02:00
|
|
|
setting := map[string]interface{}{
|
|
|
|
"name": *j.Name,
|
|
|
|
}
|
|
|
|
if j.Value != nil {
|
|
|
|
setting["value"] = *j.Value
|
|
|
|
}
|
|
|
|
|
|
|
|
settings = append(settings, setting)
|
2016-05-09 20:19:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
r["option_settings"] = settings
|
|
|
|
}
|
2015-12-21 10:15:52 +01:00
|
|
|
result = append(result, r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2014-10-23 02:03:57 +02:00
|
|
|
// Flattens an array of Parameters into a []map[string]interface{}
|
2015-04-16 22:28:18 +02:00
|
|
|
func flattenParameters(list []*rds.Parameter) []map[string]interface{} {
|
2014-10-22 23:22:30 +02:00
|
|
|
result := make([]map[string]interface{}, 0, len(list))
|
|
|
|
for _, i := range list {
|
2015-12-14 21:41:54 +01:00
|
|
|
if i.ParameterName != nil {
|
|
|
|
r := make(map[string]interface{})
|
|
|
|
r["name"] = strings.ToLower(*i.ParameterName)
|
|
|
|
// Default empty string, guard against nil parameter values
|
|
|
|
r["value"] = ""
|
|
|
|
if i.ParameterValue != nil {
|
|
|
|
r["value"] = strings.ToLower(*i.ParameterValue)
|
|
|
|
}
|
2016-09-01 14:08:30 +02:00
|
|
|
if i.ApplyMethod != nil {
|
|
|
|
r["apply_method"] = strings.ToLower(*i.ApplyMethod)
|
|
|
|
}
|
|
|
|
|
2015-12-14 21:41:54 +01:00
|
|
|
result = append(result, r)
|
|
|
|
}
|
2014-10-22 23:22:30 +02:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-11-12 00:37:56 +01:00
|
|
|
// Flattens an array of Redshift Parameters into a []map[string]interface{}
|
|
|
|
func flattenRedshiftParameters(list []*redshift.Parameter) []map[string]interface{} {
|
|
|
|
result := make([]map[string]interface{}, 0, len(list))
|
|
|
|
for _, i := range list {
|
|
|
|
result = append(result, map[string]interface{}{
|
|
|
|
"name": strings.ToLower(*i.ParameterName),
|
|
|
|
"value": strings.ToLower(*i.ParameterValue),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-06-08 21:05:00 +02:00
|
|
|
// Flattens an array of Parameters into a []map[string]interface{}
|
|
|
|
func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]interface{} {
|
|
|
|
result := make([]map[string]interface{}, 0, len(list))
|
|
|
|
for _, i := range list {
|
2016-05-16 19:20:06 +02:00
|
|
|
if i.ParameterValue != nil {
|
|
|
|
result = append(result, map[string]interface{}{
|
|
|
|
"name": strings.ToLower(*i.ParameterName),
|
2016-11-09 12:34:00 +01:00
|
|
|
"value": *i.ParameterValue,
|
2016-05-16 19:20:06 +02:00
|
|
|
})
|
|
|
|
}
|
2015-06-08 21:05:00 +02:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2014-07-03 01:57:57 +02:00
|
|
|
// Takes the result of flatmap.Expand for an array of strings
|
2015-09-16 00:11:53 +02:00
|
|
|
// and returns a []*string
|
2015-04-16 22:28:18 +02:00
|
|
|
func expandStringList(configured []interface{}) []*string {
|
2015-04-16 22:18:01 +02:00
|
|
|
vs := make([]*string, 0, len(configured))
|
2014-07-03 01:57:57 +02:00
|
|
|
for _, v := range configured {
|
2016-12-15 19:18:57 +01:00
|
|
|
val, ok := v.(string)
|
|
|
|
if ok && val != "" {
|
|
|
|
vs = append(vs, aws.String(v.(string)))
|
|
|
|
}
|
2014-07-03 01:57:57 +02:00
|
|
|
}
|
|
|
|
return vs
|
|
|
|
}
|
2015-03-17 13:42:05 +01:00
|
|
|
|
2016-02-11 09:38:50 +01:00
|
|
|
// Takes the result of schema.Set of strings and returns a []*string
|
|
|
|
func expandStringSet(configured *schema.Set) []*string {
|
|
|
|
return expandStringList(configured.List())
|
|
|
|
}
|
|
|
|
|
2015-09-16 00:11:53 +02:00
|
|
|
// Takes list of pointers to strings. Expand to an array
|
|
|
|
// of raw strings and returns a []interface{}
|
|
|
|
// to keep compatibility w/ schema.NewSetschema.NewSet
|
|
|
|
func flattenStringList(list []*string) []interface{} {
|
|
|
|
vs := make([]interface{}, 0, len(list))
|
|
|
|
for _, v := range list {
|
|
|
|
vs = append(vs, *v)
|
|
|
|
}
|
|
|
|
return vs
|
|
|
|
}
|
|
|
|
|
2015-03-17 13:42:05 +01:00
|
|
|
//Flattens an array of private ip addresses into a []string, where the elements returned are the IP strings e.g. "192.168.0.0"
|
2015-09-11 20:56:20 +02:00
|
|
|
func flattenNetworkInterfacesPrivateIPAddresses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string {
|
2015-03-17 13:42:05 +01:00
|
|
|
ips := make([]string, 0, len(dtos))
|
|
|
|
for _, v := range dtos {
|
2015-08-17 20:27:16 +02:00
|
|
|
ip := *v.PrivateIpAddress
|
2015-03-17 13:42:05 +01:00
|
|
|
ips = append(ips, ip)
|
|
|
|
}
|
|
|
|
return ips
|
|
|
|
}
|
|
|
|
|
|
|
|
//Flattens security group identifiers into a []string, where the elements returned are the GroupIDs
|
2015-04-16 22:28:18 +02:00
|
|
|
func flattenGroupIdentifiers(dtos []*ec2.GroupIdentifier) []string {
|
2015-03-17 13:42:05 +01:00
|
|
|
ids := make([]string, 0, len(dtos))
|
|
|
|
for _, v := range dtos {
|
2015-08-17 20:27:16 +02:00
|
|
|
group_id := *v.GroupId
|
2015-03-17 13:42:05 +01:00
|
|
|
ids = append(ids, group_id)
|
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|
|
|
|
|
|
|
|
//Expands an array of IPs into a ec2 Private IP Address Spec
|
2015-09-11 20:56:20 +02:00
|
|
|
func expandPrivateIPAddresses(ips []interface{}) []*ec2.PrivateIpAddressSpecification {
|
2015-08-17 20:27:16 +02:00
|
|
|
dtos := make([]*ec2.PrivateIpAddressSpecification, 0, len(ips))
|
2015-03-17 13:42:05 +01:00
|
|
|
for i, v := range ips {
|
2015-08-17 20:27:16 +02:00
|
|
|
new_private_ip := &ec2.PrivateIpAddressSpecification{
|
|
|
|
PrivateIpAddress: aws.String(v.(string)),
|
2015-03-17 14:00:36 +01:00
|
|
|
}
|
|
|
|
|
2015-07-28 22:29:46 +02:00
|
|
|
new_private_ip.Primary = aws.Bool(i == 0)
|
2015-03-17 14:00:36 +01:00
|
|
|
|
2015-03-17 13:42:05 +01:00
|
|
|
dtos = append(dtos, new_private_ip)
|
|
|
|
}
|
|
|
|
return dtos
|
|
|
|
}
|
|
|
|
|
|
|
|
//Flattens network interface attachment into a map[string]interface
|
2015-04-16 22:28:18 +02:00
|
|
|
func flattenAttachment(a *ec2.NetworkInterfaceAttachment) map[string]interface{} {
|
2015-03-17 14:00:36 +01:00
|
|
|
att := make(map[string]interface{})
|
2016-05-24 15:52:38 +02:00
|
|
|
if a.InstanceId != nil {
|
|
|
|
att["instance"] = *a.InstanceId
|
|
|
|
}
|
2015-03-17 13:42:05 +01:00
|
|
|
att["device_index"] = *a.DeviceIndex
|
2015-08-17 20:27:16 +02:00
|
|
|
att["attachment_id"] = *a.AttachmentId
|
2015-03-17 13:42:05 +01:00
|
|
|
return att
|
2015-03-17 14:00:36 +01:00
|
|
|
}
|
2015-04-16 22:18:01 +02:00
|
|
|
|
provider/aws: Support Import of aws_elasticache_cluster
Initial tests were failing as follows:
```
=== RUN TestAccAWSElasticacheCluster_importBasic
--- FAIL: TestAccAWSElasticacheCluster_importBasic (362.66s)
testing.go:265: Step 1 error: ImportStateVerify attributes not
equivalent. Difference is shown below. Top is actual, bottom is
expected.
(map[string]string) {
}
(map[string]string) (len=2) {
(string) (len=20) "parameter_group_name": (string) (len=20)
"default.memcached1.4",
(string) (len=22) "security_group_names.#":
(string) (len=1) "0"
}
FAIL
exit status 1
```
The import of ElastiCache clusters helped to point out 3 things:
1. Currently, we were trying to set the parameter_group_name as follows:
```
d.Set("parameter_group_name", c.CacheParameterGroup)
```
Unfortunately, c.CacheParameterGroup is a struct not a string. This was
causing the test import failure. So this had to be replaced as follows:
```
if c.CacheParameterGroup != nil {
d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName)
}
```
2. We were trying to set the security_group_names as follows:
```
d.Set("security_group_names", c.CacheSecurityGroups)
```
The CacheSecurityGroups was actually a []* so had to be changed to work
as follows:
```
if len(c.CacheSecurityGroups) > 0 {
d.Set("security_group_names",
flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups))
}
```
3. We were trying to set the security_group_ids as follows:
```
d.Set("security_group_ids", c.SecurityGroups)
```
This is another []* and needs to be changed as follows:
```
if len(c.SecurityGroups) > 0 {
d.Set("security_group_ids",
flattenElastiCacheSecurityGroupIds(c.SecurityGroups))
}
```
This then allows the import test to pass as expected:
```
% make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSElasticacheCluster_importBasic'
==> Checking that code complies with gofmt requirements...
go generate $(go list ./... | grep -v /terraform/vendor/)
2016/09/23 10:59:01 Generated command/internal_plugin_list.go
TF_ACC=1 go test ./builtin/providers/aws -v
-run=TestAccAWSElasticacheCluster_importBasic -timeout 120m
=== RUN TestAccAWSElasticacheCluster_importBasic
--- PASS: TestAccAWSElasticacheCluster_importBasic (351.96s)
PASS
ok github.com/hashicorp/terraform/builtin/providers/aws 351.981s
```
As a final test, I ran the basic ElastiCache cluster creation to make
sure all passed as expected:
```
% make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSElasticacheCluster_basic'
==> Checking that code complies with gofmt requirements...
go generate $(go list ./... | grep -v /terraform/vendor/)
2016/09/23 11:05:51 Generated command/internal_plugin_list.go
TF_ACC=1 go test ./builtin/providers/aws -v
-run=TestAccAWSElasticacheCluster_basic -timeout 120m
=== RUN TestAccAWSElasticacheCluster_basic
--- PASS: TestAccAWSElasticacheCluster_basic (809.25s)
PASS
ok github.com/hashicorp/terraform/builtin/providers/aws 809.267s
```
2016-09-23 12:06:19 +02:00
|
|
|
func flattenElastiCacheSecurityGroupNames(securityGroups []*elasticache.CacheSecurityGroupMembership) []string {
|
|
|
|
result := make([]string, 0, len(securityGroups))
|
|
|
|
for _, sg := range securityGroups {
|
|
|
|
if sg.CacheSecurityGroupName != nil {
|
|
|
|
result = append(result, *sg.CacheSecurityGroupName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenElastiCacheSecurityGroupIds(securityGroups []*elasticache.SecurityGroupMembership) []string {
|
|
|
|
result := make([]string, 0, len(securityGroups))
|
|
|
|
for _, sg := range securityGroups {
|
|
|
|
if sg.SecurityGroupId != nil {
|
|
|
|
result = append(result, *sg.SecurityGroupId)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-12-12 00:52:02 +01:00
|
|
|
// Flattens step adjustments into a list of map[string]interface.
|
|
|
|
func flattenStepAdjustments(adjustments []*autoscaling.StepAdjustment) []map[string]interface{} {
|
|
|
|
result := make([]map[string]interface{}, 0, len(adjustments))
|
|
|
|
for _, raw := range adjustments {
|
|
|
|
a := map[string]interface{}{
|
|
|
|
"scaling_adjustment": *raw.ScalingAdjustment,
|
|
|
|
}
|
|
|
|
if raw.MetricIntervalUpperBound != nil {
|
|
|
|
a["metric_interval_upper_bound"] = *raw.MetricIntervalUpperBound
|
|
|
|
}
|
|
|
|
if raw.MetricIntervalLowerBound != nil {
|
|
|
|
a["metric_interval_lower_bound"] = *raw.MetricIntervalLowerBound
|
|
|
|
}
|
|
|
|
result = append(result, a)
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-04-16 22:18:01 +02:00
|
|
|
func flattenResourceRecords(recs []*route53.ResourceRecord) []string {
|
|
|
|
strs := make([]string, 0, len(recs))
|
|
|
|
for _, r := range recs {
|
|
|
|
if r.Value != nil {
|
|
|
|
s := strings.Replace(*r.Value, "\"", "", 2)
|
|
|
|
strs = append(strs, s)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandResourceRecords(recs []interface{}, typeStr string) []*route53.ResourceRecord {
|
|
|
|
records := make([]*route53.ResourceRecord, 0, len(recs))
|
|
|
|
for _, r := range recs {
|
|
|
|
s := r.(string)
|
|
|
|
switch typeStr {
|
2015-11-30 13:01:32 +01:00
|
|
|
case "TXT", "SPF":
|
2015-04-16 22:18:01 +02:00
|
|
|
str := fmt.Sprintf("\"%s\"", s)
|
|
|
|
records = append(records, &route53.ResourceRecord{Value: aws.String(str)})
|
|
|
|
default:
|
|
|
|
records = append(records, &route53.ResourceRecord{Value: aws.String(s)})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return records
|
|
|
|
}
|
2015-08-04 23:24:55 +02:00
|
|
|
|
2015-10-02 00:12:46 +02:00
|
|
|
func expandESClusterConfig(m map[string]interface{}) *elasticsearch.ElasticsearchClusterConfig {
|
|
|
|
config := elasticsearch.ElasticsearchClusterConfig{}
|
|
|
|
|
|
|
|
if v, ok := m["dedicated_master_enabled"]; ok {
|
|
|
|
isEnabled := v.(bool)
|
|
|
|
config.DedicatedMasterEnabled = aws.Bool(isEnabled)
|
|
|
|
|
|
|
|
if isEnabled {
|
|
|
|
if v, ok := m["dedicated_master_count"]; ok && v.(int) > 0 {
|
|
|
|
config.DedicatedMasterCount = aws.Int64(int64(v.(int)))
|
|
|
|
}
|
|
|
|
if v, ok := m["dedicated_master_type"]; ok && v.(string) != "" {
|
|
|
|
config.DedicatedMasterType = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := m["instance_count"]; ok {
|
|
|
|
config.InstanceCount = aws.Int64(int64(v.(int)))
|
|
|
|
}
|
|
|
|
if v, ok := m["instance_type"]; ok {
|
|
|
|
config.InstanceType = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := m["zone_awareness_enabled"]; ok {
|
|
|
|
config.ZoneAwarenessEnabled = aws.Bool(v.(bool))
|
|
|
|
}
|
|
|
|
|
|
|
|
return &config
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenESClusterConfig(c *elasticsearch.ElasticsearchClusterConfig) []map[string]interface{} {
|
|
|
|
m := map[string]interface{}{}
|
|
|
|
|
|
|
|
if c.DedicatedMasterCount != nil {
|
|
|
|
m["dedicated_master_count"] = *c.DedicatedMasterCount
|
|
|
|
}
|
|
|
|
if c.DedicatedMasterEnabled != nil {
|
|
|
|
m["dedicated_master_enabled"] = *c.DedicatedMasterEnabled
|
|
|
|
}
|
|
|
|
if c.DedicatedMasterType != nil {
|
|
|
|
m["dedicated_master_type"] = *c.DedicatedMasterType
|
|
|
|
}
|
|
|
|
if c.InstanceCount != nil {
|
|
|
|
m["instance_count"] = *c.InstanceCount
|
|
|
|
}
|
|
|
|
if c.InstanceType != nil {
|
|
|
|
m["instance_type"] = *c.InstanceType
|
|
|
|
}
|
|
|
|
if c.ZoneAwarenessEnabled != nil {
|
|
|
|
m["zone_awareness_enabled"] = *c.ZoneAwarenessEnabled
|
|
|
|
}
|
|
|
|
|
|
|
|
return []map[string]interface{}{m}
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenESEBSOptions(o *elasticsearch.EBSOptions) []map[string]interface{} {
|
|
|
|
m := map[string]interface{}{}
|
|
|
|
|
|
|
|
if o.EBSEnabled != nil {
|
|
|
|
m["ebs_enabled"] = *o.EBSEnabled
|
|
|
|
}
|
|
|
|
if o.Iops != nil {
|
|
|
|
m["iops"] = *o.Iops
|
|
|
|
}
|
|
|
|
if o.VolumeSize != nil {
|
|
|
|
m["volume_size"] = *o.VolumeSize
|
|
|
|
}
|
|
|
|
if o.VolumeType != nil {
|
|
|
|
m["volume_type"] = *o.VolumeType
|
|
|
|
}
|
|
|
|
|
|
|
|
return []map[string]interface{}{m}
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandESEBSOptions(m map[string]interface{}) *elasticsearch.EBSOptions {
|
|
|
|
options := elasticsearch.EBSOptions{}
|
|
|
|
|
|
|
|
if v, ok := m["ebs_enabled"]; ok {
|
|
|
|
options.EBSEnabled = aws.Bool(v.(bool))
|
|
|
|
}
|
|
|
|
if v, ok := m["iops"]; ok && v.(int) > 0 {
|
|
|
|
options.Iops = aws.Int64(int64(v.(int)))
|
|
|
|
}
|
|
|
|
if v, ok := m["volume_size"]; ok && v.(int) > 0 {
|
|
|
|
options.VolumeSize = aws.Int64(int64(v.(int)))
|
|
|
|
}
|
|
|
|
if v, ok := m["volume_type"]; ok && v.(string) != "" {
|
|
|
|
options.VolumeType = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
return &options
|
|
|
|
}
|
|
|
|
|
2017-02-02 23:32:09 +01:00
|
|
|
func expandConfigRecordingGroup(configured []interface{}) *configservice.RecordingGroup {
|
|
|
|
recordingGroup := configservice.RecordingGroup{}
|
|
|
|
group := configured[0].(map[string]interface{})
|
|
|
|
|
|
|
|
if v, ok := group["all_supported"]; ok {
|
|
|
|
recordingGroup.AllSupported = aws.Bool(v.(bool))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := group["include_global_resource_types"]; ok {
|
|
|
|
recordingGroup.IncludeGlobalResourceTypes = aws.Bool(v.(bool))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := group["resource_types"]; ok {
|
|
|
|
recordingGroup.ResourceTypes = expandStringList(v.(*schema.Set).List())
|
|
|
|
}
|
|
|
|
return &recordingGroup
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenConfigRecordingGroup(g *configservice.RecordingGroup) []map[string]interface{} {
|
|
|
|
m := make(map[string]interface{}, 1)
|
|
|
|
|
|
|
|
if g.AllSupported != nil {
|
|
|
|
m["all_supported"] = *g.AllSupported
|
|
|
|
}
|
|
|
|
|
|
|
|
if g.IncludeGlobalResourceTypes != nil {
|
|
|
|
m["include_global_resource_types"] = *g.IncludeGlobalResourceTypes
|
|
|
|
}
|
|
|
|
|
|
|
|
if g.ResourceTypes != nil && len(g.ResourceTypes) > 0 {
|
|
|
|
m["resource_types"] = schema.NewSet(schema.HashString, flattenStringList(g.ResourceTypes))
|
|
|
|
}
|
|
|
|
|
|
|
|
return []map[string]interface{}{m}
|
|
|
|
}
|
|
|
|
|
2017-02-02 23:33:55 +01:00
|
|
|
func flattenConfigSnapshotDeliveryProperties(p *configservice.ConfigSnapshotDeliveryProperties) []map[string]interface{} {
|
|
|
|
m := make(map[string]interface{}, 0)
|
|
|
|
|
|
|
|
if p.DeliveryFrequency != nil {
|
|
|
|
m["delivery_frequency"] = *p.DeliveryFrequency
|
|
|
|
}
|
|
|
|
|
|
|
|
return []map[string]interface{}{m}
|
|
|
|
}
|
|
|
|
|
2015-10-02 00:12:46 +02:00
|
|
|
func pointersMapToStringList(pointers map[string]*string) map[string]interface{} {
|
|
|
|
list := make(map[string]interface{}, len(pointers))
|
|
|
|
for i, v := range pointers {
|
|
|
|
list[i] = *v
|
|
|
|
}
|
|
|
|
return list
|
|
|
|
}
|
|
|
|
|
|
|
|
func stringMapToPointers(m map[string]interface{}) map[string]*string {
|
|
|
|
list := make(map[string]*string, len(m))
|
|
|
|
for i, v := range m {
|
|
|
|
list[i] = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
return list
|
|
|
|
}
|
2015-09-13 18:57:58 +02:00
|
|
|
|
|
|
|
func flattenDSVpcSettings(
|
|
|
|
s *directoryservice.DirectoryVpcSettingsDescription) []map[string]interface{} {
|
|
|
|
settings := make(map[string]interface{}, 0)
|
|
|
|
|
2015-12-18 22:42:54 +01:00
|
|
|
if s == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds))
|
|
|
|
settings["vpc_id"] = *s.VpcId
|
|
|
|
|
|
|
|
return []map[string]interface{}{settings}
|
|
|
|
}
|
|
|
|
|
2016-11-23 17:36:16 +01:00
|
|
|
func flattenLambdaEnvironment(lambdaEnv *lambda.EnvironmentResponse) []interface{} {
|
2016-11-21 23:52:14 +01:00
|
|
|
envs := make(map[string]interface{})
|
|
|
|
en := make(map[string]string)
|
2016-11-23 17:36:16 +01:00
|
|
|
|
|
|
|
if lambdaEnv == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, v := range lambdaEnv.Variables {
|
2016-11-21 23:52:14 +01:00
|
|
|
en[k] = *v
|
|
|
|
}
|
|
|
|
if len(en) > 0 {
|
|
|
|
envs["variables"] = en
|
|
|
|
}
|
|
|
|
|
|
|
|
return []interface{}{envs}
|
|
|
|
}
|
|
|
|
|
2016-02-16 06:38:17 +01:00
|
|
|
func flattenLambdaVpcConfigResponse(s *lambda.VpcConfigResponse) []map[string]interface{} {
|
|
|
|
settings := make(map[string]interface{}, 0)
|
|
|
|
|
|
|
|
if s == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-21 23:52:14 +01:00
|
|
|
var emptyVpc bool
|
|
|
|
if s.VpcId == nil || *s.VpcId == "" {
|
|
|
|
emptyVpc = true
|
|
|
|
}
|
|
|
|
if len(s.SubnetIds) == 0 && len(s.SecurityGroupIds) == 0 && emptyVpc {
|
2016-02-23 21:56:46 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-16 06:38:17 +01:00
|
|
|
settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds))
|
|
|
|
settings["security_group_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SecurityGroupIds))
|
2016-02-18 22:45:32 +01:00
|
|
|
if s.VpcId != nil {
|
|
|
|
settings["vpc_id"] = *s.VpcId
|
|
|
|
}
|
2016-02-16 06:38:17 +01:00
|
|
|
|
|
|
|
return []map[string]interface{}{settings}
|
|
|
|
}
|
|
|
|
|
2015-12-18 22:42:54 +01:00
|
|
|
func flattenDSConnectSettings(
|
|
|
|
customerDnsIps []*string,
|
|
|
|
s *directoryservice.DirectoryConnectSettingsDescription) []map[string]interface{} {
|
|
|
|
if s == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
settings := make(map[string]interface{}, 0)
|
|
|
|
|
|
|
|
settings["customer_dns_ips"] = schema.NewSet(schema.HashString, flattenStringList(customerDnsIps))
|
|
|
|
settings["connect_ips"] = schema.NewSet(schema.HashString, flattenStringList(s.ConnectIps))
|
|
|
|
settings["customer_username"] = *s.CustomerUserName
|
2015-09-13 18:57:58 +02:00
|
|
|
settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds))
|
|
|
|
settings["vpc_id"] = *s.VpcId
|
|
|
|
|
|
|
|
return []map[string]interface{}{settings}
|
|
|
|
}
|
2015-07-07 09:00:05 +02:00
|
|
|
|
|
|
|
func expandCloudFormationParameters(params map[string]interface{}) []*cloudformation.Parameter {
|
|
|
|
var cfParams []*cloudformation.Parameter
|
|
|
|
for k, v := range params {
|
|
|
|
cfParams = append(cfParams, &cloudformation.Parameter{
|
|
|
|
ParameterKey: aws.String(k),
|
|
|
|
ParameterValue: aws.String(v.(string)),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return cfParams
|
|
|
|
}
|
|
|
|
|
|
|
|
// flattenCloudFormationParameters is flattening list of
|
|
|
|
// *cloudformation.Parameters and only returning existing
|
|
|
|
// parameters to avoid clash with default values
|
|
|
|
func flattenCloudFormationParameters(cfParams []*cloudformation.Parameter,
|
|
|
|
originalParams map[string]interface{}) map[string]interface{} {
|
|
|
|
params := make(map[string]interface{}, len(cfParams))
|
|
|
|
for _, p := range cfParams {
|
|
|
|
_, isConfigured := originalParams[*p.ParameterKey]
|
|
|
|
if isConfigured {
|
|
|
|
params[*p.ParameterKey] = *p.ParameterValue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return params
|
|
|
|
}
|
|
|
|
|
2016-06-02 11:08:12 +02:00
|
|
|
func flattenAllCloudFormationParameters(cfParams []*cloudformation.Parameter) map[string]interface{} {
|
|
|
|
params := make(map[string]interface{}, len(cfParams))
|
|
|
|
for _, p := range cfParams {
|
|
|
|
params[*p.ParameterKey] = *p.ParameterValue
|
|
|
|
}
|
|
|
|
return params
|
|
|
|
}
|
|
|
|
|
2015-07-07 09:00:05 +02:00
|
|
|
func expandCloudFormationTags(tags map[string]interface{}) []*cloudformation.Tag {
|
|
|
|
var cfTags []*cloudformation.Tag
|
|
|
|
for k, v := range tags {
|
|
|
|
cfTags = append(cfTags, &cloudformation.Tag{
|
|
|
|
Key: aws.String(k),
|
|
|
|
Value: aws.String(v.(string)),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return cfTags
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenCloudFormationTags(cfTags []*cloudformation.Tag) map[string]string {
|
|
|
|
tags := make(map[string]string, len(cfTags))
|
|
|
|
for _, t := range cfTags {
|
|
|
|
tags[*t.Key] = *t.Value
|
|
|
|
}
|
|
|
|
return tags
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenCloudFormationOutputs(cfOutputs []*cloudformation.Output) map[string]string {
|
|
|
|
outputs := make(map[string]string, len(cfOutputs))
|
|
|
|
for _, o := range cfOutputs {
|
|
|
|
outputs[*o.OutputKey] = *o.OutputValue
|
|
|
|
}
|
|
|
|
return outputs
|
|
|
|
}
|
2016-01-15 11:29:15 +01:00
|
|
|
|
2016-11-21 16:02:20 +01:00
|
|
|
func flattenAsgSuspendedProcesses(list []*autoscaling.SuspendedProcess) []string {
|
|
|
|
strs := make([]string, 0, len(list))
|
|
|
|
for _, r := range list {
|
|
|
|
if r.ProcessName != nil {
|
|
|
|
strs = append(strs, *r.ProcessName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
2016-01-15 11:29:15 +01:00
|
|
|
func flattenAsgEnabledMetrics(list []*autoscaling.EnabledMetric) []string {
|
|
|
|
strs := make([]string, 0, len(list))
|
|
|
|
for _, r := range list {
|
|
|
|
if r.Metric != nil {
|
|
|
|
strs = append(strs, *r.Metric)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
2016-03-05 23:19:25 +01:00
|
|
|
|
2016-07-22 00:37:58 +02:00
|
|
|
func flattenKinesisShardLevelMetrics(list []*kinesis.EnhancedMetrics) []string {
|
|
|
|
if len(list) == 0 {
|
|
|
|
return []string{}
|
|
|
|
}
|
|
|
|
strs := make([]string, 0, len(list[0].ShardLevelMetrics))
|
|
|
|
for _, s := range list[0].ShardLevelMetrics {
|
|
|
|
strs = append(strs, *s)
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
2016-07-06 12:21:47 +02:00
|
|
|
func flattenApiGatewayStageKeys(keys []*string) []map[string]interface{} {
|
2016-07-11 23:10:13 +02:00
|
|
|
stageKeys := make([]map[string]interface{}, 0, len(keys))
|
|
|
|
for _, o := range keys {
|
|
|
|
key := make(map[string]interface{})
|
|
|
|
parts := strings.Split(*o, "/")
|
|
|
|
key["stage_name"] = parts[1]
|
|
|
|
key["rest_api_id"] = parts[0]
|
|
|
|
|
|
|
|
stageKeys = append(stageKeys, key)
|
|
|
|
}
|
|
|
|
return stageKeys
|
2016-07-06 12:21:47 +02:00
|
|
|
}
|
|
|
|
|
2016-03-05 23:19:25 +01:00
|
|
|
func expandApiGatewayStageKeys(d *schema.ResourceData) []*apigateway.StageKey {
|
|
|
|
var stageKeys []*apigateway.StageKey
|
|
|
|
|
|
|
|
if stageKeyData, ok := d.GetOk("stage_key"); ok {
|
|
|
|
params := stageKeyData.(*schema.Set).List()
|
|
|
|
for k := range params {
|
|
|
|
data := params[k].(map[string]interface{})
|
|
|
|
stageKeys = append(stageKeys, &apigateway.StageKey{
|
|
|
|
RestApiId: aws.String(data["rest_api_id"].(string)),
|
|
|
|
StageName: aws.String(data["stage_name"].(string)),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return stageKeys
|
|
|
|
}
|
|
|
|
|
2016-03-05 23:16:16 +01:00
|
|
|
func expandApiGatewayRequestResponseModelOperations(d *schema.ResourceData, key string, prefix string) []*apigateway.PatchOperation {
|
|
|
|
operations := make([]*apigateway.PatchOperation, 0)
|
|
|
|
|
|
|
|
oldModels, newModels := d.GetChange(key)
|
|
|
|
oldModelMap := oldModels.(map[string]interface{})
|
|
|
|
newModelMap := newModels.(map[string]interface{})
|
|
|
|
|
|
|
|
for k, _ := range oldModelMap {
|
|
|
|
operation := apigateway.PatchOperation{
|
|
|
|
Op: aws.String("remove"),
|
|
|
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
|
|
|
|
}
|
|
|
|
|
|
|
|
for nK, nV := range newModelMap {
|
|
|
|
if nK == k {
|
|
|
|
operation.Op = aws.String("replace")
|
|
|
|
operation.Value = aws.String(nV.(string))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
operations = append(operations, &operation)
|
|
|
|
}
|
|
|
|
|
|
|
|
for nK, nV := range newModelMap {
|
|
|
|
exists := false
|
|
|
|
for k, _ := range oldModelMap {
|
|
|
|
if k == nK {
|
|
|
|
exists = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !exists {
|
|
|
|
operation := apigateway.PatchOperation{
|
|
|
|
Op: aws.String("add"),
|
|
|
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(nK, "/", "~1", -1))),
|
|
|
|
Value: aws.String(nV.(string)),
|
|
|
|
}
|
|
|
|
operations = append(operations, &operation)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return operations
|
|
|
|
}
|
|
|
|
|
2016-08-11 12:49:59 +02:00
|
|
|
func deprecatedExpandApiGatewayMethodParametersJSONOperations(d *schema.ResourceData, key string, prefix string) ([]*apigateway.PatchOperation, error) {
|
2016-05-04 12:56:18 +02:00
|
|
|
operations := make([]*apigateway.PatchOperation, 0)
|
|
|
|
oldParameters, newParameters := d.GetChange(key)
|
|
|
|
oldParametersMap := make(map[string]interface{})
|
|
|
|
newParametersMap := make(map[string]interface{})
|
|
|
|
|
|
|
|
if err := json.Unmarshal([]byte(oldParameters.(string)), &oldParametersMap); err != nil {
|
2016-05-05 22:14:51 +02:00
|
|
|
err := fmt.Errorf("Error unmarshaling old %s: %s", key, err)
|
2016-05-04 12:56:18 +02:00
|
|
|
return operations, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := json.Unmarshal([]byte(newParameters.(string)), &newParametersMap); err != nil {
|
2016-05-05 22:14:51 +02:00
|
|
|
err := fmt.Errorf("Error unmarshaling new %s: %s", key, err)
|
2016-05-04 12:56:18 +02:00
|
|
|
return operations, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, _ := range oldParametersMap {
|
|
|
|
operation := apigateway.PatchOperation{
|
|
|
|
Op: aws.String("remove"),
|
|
|
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, k)),
|
|
|
|
}
|
|
|
|
|
|
|
|
for nK, nV := range newParametersMap {
|
|
|
|
if nK == k {
|
|
|
|
operation.Op = aws.String("replace")
|
|
|
|
operation.Value = aws.String(strconv.FormatBool(nV.(bool)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
operations = append(operations, &operation)
|
|
|
|
}
|
|
|
|
|
|
|
|
for nK, nV := range newParametersMap {
|
|
|
|
exists := false
|
|
|
|
for k, _ := range oldParametersMap {
|
|
|
|
if k == nK {
|
|
|
|
exists = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !exists {
|
|
|
|
operation := apigateway.PatchOperation{
|
|
|
|
Op: aws.String("add"),
|
|
|
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, nK)),
|
|
|
|
Value: aws.String(strconv.FormatBool(nV.(bool))),
|
|
|
|
}
|
|
|
|
operations = append(operations, &operation)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return operations, nil
|
|
|
|
}
|
|
|
|
|
2016-08-11 12:49:59 +02:00
|
|
|
func expandApiGatewayMethodParametersOperations(d *schema.ResourceData, key string, prefix string) ([]*apigateway.PatchOperation, error) {
|
|
|
|
operations := make([]*apigateway.PatchOperation, 0)
|
|
|
|
|
|
|
|
oldParameters, newParameters := d.GetChange(key)
|
|
|
|
oldParametersMap := oldParameters.(map[string]interface{})
|
|
|
|
newParametersMap := newParameters.(map[string]interface{})
|
|
|
|
|
|
|
|
for k, _ := range oldParametersMap {
|
|
|
|
operation := apigateway.PatchOperation{
|
|
|
|
Op: aws.String("remove"),
|
|
|
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, k)),
|
|
|
|
}
|
|
|
|
|
|
|
|
for nK, nV := range newParametersMap {
|
|
|
|
b, ok := nV.(bool)
|
|
|
|
if !ok {
|
|
|
|
value, _ := strconv.ParseBool(nV.(string))
|
|
|
|
b = value
|
|
|
|
}
|
|
|
|
if nK == k {
|
|
|
|
operation.Op = aws.String("replace")
|
|
|
|
operation.Value = aws.String(strconv.FormatBool(b))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
operations = append(operations, &operation)
|
|
|
|
}
|
|
|
|
|
|
|
|
for nK, nV := range newParametersMap {
|
|
|
|
exists := false
|
|
|
|
for k, _ := range oldParametersMap {
|
|
|
|
if k == nK {
|
|
|
|
exists = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !exists {
|
|
|
|
b, ok := nV.(bool)
|
|
|
|
if !ok {
|
|
|
|
value, _ := strconv.ParseBool(nV.(string))
|
|
|
|
b = value
|
|
|
|
}
|
|
|
|
operation := apigateway.PatchOperation{
|
|
|
|
Op: aws.String("add"),
|
|
|
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, nK)),
|
|
|
|
Value: aws.String(strconv.FormatBool(b)),
|
|
|
|
}
|
|
|
|
operations = append(operations, &operation)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return operations, nil
|
|
|
|
}
|
|
|
|
|
2016-03-05 23:19:25 +01:00
|
|
|
func expandApiGatewayStageKeyOperations(d *schema.ResourceData) []*apigateway.PatchOperation {
|
|
|
|
operations := make([]*apigateway.PatchOperation, 0)
|
|
|
|
|
|
|
|
prev, curr := d.GetChange("stage_key")
|
|
|
|
prevList := prev.(*schema.Set).List()
|
|
|
|
currList := curr.(*schema.Set).List()
|
|
|
|
|
|
|
|
for i := range prevList {
|
|
|
|
p := prevList[i].(map[string]interface{})
|
|
|
|
exists := false
|
|
|
|
|
|
|
|
for j := range currList {
|
|
|
|
c := currList[j].(map[string]interface{})
|
|
|
|
if c["rest_api_id"].(string) == p["rest_api_id"].(string) && c["stage_name"].(string) == p["stage_name"].(string) {
|
|
|
|
exists = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !exists {
|
|
|
|
operations = append(operations, &apigateway.PatchOperation{
|
|
|
|
Op: aws.String("remove"),
|
|
|
|
Path: aws.String("/stages"),
|
|
|
|
Value: aws.String(fmt.Sprintf("%s/%s", p["rest_api_id"].(string), p["stage_name"].(string))),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range currList {
|
|
|
|
c := currList[i].(map[string]interface{})
|
|
|
|
exists := false
|
|
|
|
|
|
|
|
for j := range prevList {
|
|
|
|
p := prevList[j].(map[string]interface{})
|
|
|
|
if c["rest_api_id"].(string) == p["rest_api_id"].(string) && c["stage_name"].(string) == p["stage_name"].(string) {
|
|
|
|
exists = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !exists {
|
|
|
|
operations = append(operations, &apigateway.PatchOperation{
|
|
|
|
Op: aws.String("add"),
|
|
|
|
Path: aws.String("/stages"),
|
|
|
|
Value: aws.String(fmt.Sprintf("%s/%s", c["rest_api_id"].(string), c["stage_name"].(string))),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return operations
|
|
|
|
}
|
2016-02-29 14:07:45 +01:00
|
|
|
|
|
|
|
func expandCloudWachLogMetricTransformations(m map[string]interface{}) []*cloudwatchlogs.MetricTransformation {
|
|
|
|
transformation := cloudwatchlogs.MetricTransformation{
|
|
|
|
MetricName: aws.String(m["name"].(string)),
|
|
|
|
MetricNamespace: aws.String(m["namespace"].(string)),
|
|
|
|
MetricValue: aws.String(m["value"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
return []*cloudwatchlogs.MetricTransformation{&transformation}
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenCloudWachLogMetricTransformations(ts []*cloudwatchlogs.MetricTransformation) map[string]string {
|
|
|
|
m := make(map[string]string, 0)
|
|
|
|
|
|
|
|
m["name"] = *ts[0].MetricName
|
|
|
|
m["namespace"] = *ts[0].MetricNamespace
|
|
|
|
m["value"] = *ts[0].MetricValue
|
|
|
|
|
|
|
|
return m
|
|
|
|
}
|
2016-03-28 21:11:25 +02:00
|
|
|
|
|
|
|
func flattenBeanstalkAsg(list []*elasticbeanstalk.AutoScalingGroup) []string {
|
|
|
|
strs := make([]string, 0, len(list))
|
|
|
|
for _, r := range list {
|
|
|
|
if r.Name != nil {
|
|
|
|
strs = append(strs, *r.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBeanstalkInstances(list []*elasticbeanstalk.Instance) []string {
|
|
|
|
strs := make([]string, 0, len(list))
|
|
|
|
for _, r := range list {
|
|
|
|
if r.Id != nil {
|
|
|
|
strs = append(strs, *r.Id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBeanstalkLc(list []*elasticbeanstalk.LaunchConfiguration) []string {
|
|
|
|
strs := make([]string, 0, len(list))
|
|
|
|
for _, r := range list {
|
|
|
|
if r.Name != nil {
|
|
|
|
strs = append(strs, *r.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBeanstalkElb(list []*elasticbeanstalk.LoadBalancer) []string {
|
|
|
|
strs := make([]string, 0, len(list))
|
|
|
|
for _, r := range list {
|
|
|
|
if r.Name != nil {
|
|
|
|
strs = append(strs, *r.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBeanstalkSqs(list []*elasticbeanstalk.Queue) []string {
|
|
|
|
strs := make([]string, 0, len(list))
|
|
|
|
for _, r := range list {
|
|
|
|
if r.URL != nil {
|
|
|
|
strs = append(strs, *r.URL)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBeanstalkTrigger(list []*elasticbeanstalk.Trigger) []string {
|
|
|
|
strs := make([]string, 0, len(list))
|
|
|
|
for _, r := range list {
|
|
|
|
if r.Name != nil {
|
|
|
|
strs = append(strs, *r.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strs
|
|
|
|
}
|
2016-04-14 21:55:11 +02:00
|
|
|
|
|
|
|
// There are several parts of the AWS API that will sort lists of strings,
|
2016-09-12 08:14:24 +02:00
|
|
|
// causing diffs inbetween resources that use lists. This avoids a bit of
|
2016-04-14 21:55:11 +02:00
|
|
|
// code duplication for pre-sorts that can be used for things like hash
|
|
|
|
// functions, etc.
|
|
|
|
func sortInterfaceSlice(in []interface{}) []interface{} {
|
|
|
|
a := []string{}
|
|
|
|
b := []interface{}{}
|
|
|
|
for _, v := range in {
|
|
|
|
a = append(a, v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(a)
|
|
|
|
|
|
|
|
for _, v := range a {
|
|
|
|
b = append(b, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
return b
|
|
|
|
}
|
2016-04-27 14:08:59 +02:00
|
|
|
|
2017-03-27 19:25:43 +02:00
|
|
|
// This function sorts List A to look like a list found in the tf file.
|
|
|
|
func sortListBasedonTFFile(in []string, d *schema.ResourceData, listName string) ([]string, error) {
|
|
|
|
if attributeCount, ok := d.Get(listName + ".#").(int); ok {
|
|
|
|
for i := 0; i < attributeCount; i++ {
|
|
|
|
currAttributeId := d.Get(listName + "." + strconv.Itoa(i))
|
|
|
|
for j := 0; j < len(in); j++ {
|
|
|
|
if currAttributeId == in[j] {
|
|
|
|
in[i], in[j] = in[j], in[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return in, nil
|
|
|
|
}
|
|
|
|
return in, fmt.Errorf("Could not find list: %s", listName)
|
|
|
|
}
|
|
|
|
|
2016-04-27 14:08:59 +02:00
|
|
|
func flattenApiGatewayThrottleSettings(settings *apigateway.ThrottleSettings) []map[string]interface{} {
|
|
|
|
result := make([]map[string]interface{}, 0, 1)
|
|
|
|
|
|
|
|
if settings != nil {
|
|
|
|
r := make(map[string]interface{})
|
|
|
|
if settings.BurstLimit != nil {
|
|
|
|
r["burst_limit"] = *settings.BurstLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
if settings.RateLimit != nil {
|
|
|
|
r["rate_limit"] = *settings.RateLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
result = append(result, r)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
2016-05-27 00:29:42 +02:00
|
|
|
|
|
|
|
// TODO: refactor some of these helper functions and types in the terraform/helper packages
|
|
|
|
|
|
|
|
// getStringPtr returns a *string version of the value taken from m, where m
|
|
|
|
// can be a map[string]interface{} or a *schema.ResourceData. If the key isn't
|
|
|
|
// present or is empty, getNilString returns nil.
|
|
|
|
func getStringPtr(m interface{}, key string) *string {
|
|
|
|
switch m := m.(type) {
|
|
|
|
case map[string]interface{}:
|
|
|
|
v := m[key]
|
|
|
|
|
|
|
|
if v == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s := v.(string)
|
|
|
|
if s == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &s
|
|
|
|
|
|
|
|
case *schema.ResourceData:
|
|
|
|
if v, ok := m.GetOk(key); ok {
|
|
|
|
if v == nil || v.(string) == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
s := v.(string)
|
|
|
|
return &s
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("unknown type in getStringPtr")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getStringPtrList returns a []*string version of the map value. If the key
|
|
|
|
// isn't present, getNilStringList returns nil.
|
|
|
|
func getStringPtrList(m map[string]interface{}, key string) []*string {
|
|
|
|
if v, ok := m[key]; ok {
|
|
|
|
var stringList []*string
|
|
|
|
for _, i := range v.([]interface{}) {
|
|
|
|
s := i.(string)
|
|
|
|
stringList = append(stringList, &s)
|
|
|
|
}
|
|
|
|
|
|
|
|
return stringList
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// a convenience wrapper type for the schema.Set map[string]interface{}
|
|
|
|
// Set operations only alter the underlying map if the value is not nil
|
|
|
|
type setMap map[string]interface{}
|
|
|
|
|
|
|
|
// SetString sets m[key] = *value only if `value != nil`
|
|
|
|
func (s setMap) SetString(key string, value *string) {
|
|
|
|
if value == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s[key] = *value
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetStringMap sets key to value as a map[string]interface{}, stripping any nil
|
|
|
|
// values. The value parameter can be a map[string]interface{}, a
|
|
|
|
// map[string]*string, or a map[string]string.
|
|
|
|
func (s setMap) SetStringMap(key string, value interface{}) {
|
|
|
|
// because these methods are meant to be chained without intermediate
|
|
|
|
// checks for nil, we are likely to get interfaces with dynamic types but
|
|
|
|
// a nil value.
|
|
|
|
if reflect.ValueOf(value).IsNil() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
m := make(map[string]interface{})
|
|
|
|
|
|
|
|
switch value := value.(type) {
|
|
|
|
case map[string]string:
|
|
|
|
for k, v := range value {
|
|
|
|
m[k] = v
|
|
|
|
}
|
|
|
|
case map[string]*string:
|
|
|
|
for k, v := range value {
|
|
|
|
if v == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
m[k] = *v
|
|
|
|
}
|
|
|
|
case map[string]interface{}:
|
|
|
|
for k, v := range value {
|
|
|
|
if v == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch v := v.(type) {
|
|
|
|
case string:
|
|
|
|
m[k] = v
|
|
|
|
case *string:
|
|
|
|
if v != nil {
|
|
|
|
m[k] = *v
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown type for SetString: %T", v))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// catch the case where the interface wasn't nil, but we had no non-nil values
|
|
|
|
if len(m) > 0 {
|
|
|
|
s[key] = m
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set assigns value to s[key] if value isn't nil
|
|
|
|
func (s setMap) Set(key string, value interface{}) {
|
|
|
|
if reflect.ValueOf(value).IsNil() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s[key] = value
|
|
|
|
}
|
|
|
|
|
|
|
|
// Map returns the raw map type for a shorter type conversion
|
|
|
|
func (s setMap) Map() map[string]interface{} {
|
|
|
|
return map[string]interface{}(s)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MapList returns the map[string]interface{} as a single element in a slice to
|
|
|
|
// match the schema.Set data type used for structs.
|
|
|
|
func (s setMap) MapList() []map[string]interface{} {
|
|
|
|
return []map[string]interface{}{s.Map()}
|
|
|
|
}
|
2016-03-15 05:13:44 +01:00
|
|
|
|
|
|
|
// Takes the result of flatmap.Expand for an array of policy attributes and
|
|
|
|
// returns ELB API compatible objects
|
2016-03-15 08:20:50 +01:00
|
|
|
func expandPolicyAttributes(configured []interface{}) ([]*elb.PolicyAttribute, error) {
|
2016-03-15 05:13:44 +01:00
|
|
|
attributes := make([]*elb.PolicyAttribute, 0, len(configured))
|
|
|
|
|
|
|
|
// Loop over our configured attributes and create
|
|
|
|
// an array of aws-sdk-go compatible objects
|
|
|
|
for _, lRaw := range configured {
|
|
|
|
data := lRaw.(map[string]interface{})
|
|
|
|
|
|
|
|
a := &elb.PolicyAttribute{
|
|
|
|
AttributeName: aws.String(data["name"].(string)),
|
|
|
|
AttributeValue: aws.String(data["value"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes = append(attributes, a)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-03-15 08:20:50 +01:00
|
|
|
return attributes, nil
|
2016-03-15 05:13:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Flattens an array of PolicyAttributes into a []interface{}
|
|
|
|
func flattenPolicyAttributes(list []*elb.PolicyAttributeDescription) []interface{} {
|
|
|
|
attributes := []interface{}{}
|
|
|
|
for _, attrdef := range list {
|
|
|
|
attribute := map[string]string{
|
|
|
|
"name": *attrdef.AttributeName,
|
|
|
|
"value": *attrdef.AttributeValue,
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes = append(attributes, attribute)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return attributes
|
|
|
|
}
|
2016-09-17 20:50:38 +02:00
|
|
|
|
2017-02-02 23:29:13 +01:00
|
|
|
func flattenConfigRuleSource(source *configservice.Source) []interface{} {
|
|
|
|
var result []interface{}
|
|
|
|
m := make(map[string]interface{})
|
|
|
|
m["owner"] = *source.Owner
|
|
|
|
m["source_identifier"] = *source.SourceIdentifier
|
|
|
|
if len(source.SourceDetails) > 0 {
|
|
|
|
m["source_detail"] = schema.NewSet(configRuleSourceDetailsHash, flattenConfigRuleSourceDetails(source.SourceDetails))
|
|
|
|
}
|
|
|
|
result = append(result, m)
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenConfigRuleSourceDetails(details []*configservice.SourceDetail) []interface{} {
|
|
|
|
var items []interface{}
|
|
|
|
for _, d := range details {
|
|
|
|
m := make(map[string]interface{})
|
|
|
|
if d.MessageType != nil {
|
|
|
|
m["message_type"] = *d.MessageType
|
|
|
|
}
|
|
|
|
if d.EventSource != nil {
|
|
|
|
m["event_source"] = *d.EventSource
|
|
|
|
}
|
|
|
|
if d.MaximumExecutionFrequency != nil {
|
|
|
|
m["maximum_execution_frequency"] = *d.MaximumExecutionFrequency
|
|
|
|
}
|
|
|
|
|
|
|
|
items = append(items, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
return items
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandConfigRuleSource(configured []interface{}) *configservice.Source {
|
|
|
|
cfg := configured[0].(map[string]interface{})
|
|
|
|
source := configservice.Source{
|
|
|
|
Owner: aws.String(cfg["owner"].(string)),
|
|
|
|
SourceIdentifier: aws.String(cfg["source_identifier"].(string)),
|
|
|
|
}
|
|
|
|
if details, ok := cfg["source_detail"]; ok {
|
|
|
|
source.SourceDetails = expandConfigRuleSourceDetails(details.(*schema.Set))
|
|
|
|
}
|
|
|
|
return &source
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandConfigRuleSourceDetails(configured *schema.Set) []*configservice.SourceDetail {
|
|
|
|
var results []*configservice.SourceDetail
|
|
|
|
|
|
|
|
for _, item := range configured.List() {
|
|
|
|
detail := item.(map[string]interface{})
|
|
|
|
src := configservice.SourceDetail{}
|
|
|
|
|
|
|
|
if msgType, ok := detail["message_type"].(string); ok && msgType != "" {
|
|
|
|
src.MessageType = aws.String(msgType)
|
|
|
|
}
|
|
|
|
if eventSource, ok := detail["event_source"].(string); ok && eventSource != "" {
|
|
|
|
src.EventSource = aws.String(eventSource)
|
|
|
|
}
|
|
|
|
if maxExecFreq, ok := detail["maximum_execution_frequency"].(string); ok && maxExecFreq != "" {
|
|
|
|
src.MaximumExecutionFrequency = aws.String(maxExecFreq)
|
|
|
|
}
|
|
|
|
|
|
|
|
results = append(results, &src)
|
|
|
|
}
|
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenConfigRuleScope(scope *configservice.Scope) []interface{} {
|
|
|
|
var items []interface{}
|
|
|
|
|
|
|
|
m := make(map[string]interface{})
|
|
|
|
if scope.ComplianceResourceId != nil {
|
|
|
|
m["compliance_resource_id"] = *scope.ComplianceResourceId
|
|
|
|
}
|
|
|
|
if scope.ComplianceResourceTypes != nil {
|
|
|
|
m["compliance_resource_types"] = schema.NewSet(schema.HashString, flattenStringList(scope.ComplianceResourceTypes))
|
|
|
|
}
|
|
|
|
if scope.TagKey != nil {
|
|
|
|
m["tag_key"] = *scope.TagKey
|
|
|
|
}
|
|
|
|
if scope.TagValue != nil {
|
|
|
|
m["tag_value"] = *scope.TagValue
|
|
|
|
}
|
|
|
|
|
|
|
|
items = append(items, m)
|
|
|
|
return items
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandConfigRuleScope(configured map[string]interface{}) *configservice.Scope {
|
|
|
|
scope := &configservice.Scope{}
|
|
|
|
|
|
|
|
if v, ok := configured["compliance_resource_id"].(string); ok && v != "" {
|
|
|
|
scope.ComplianceResourceId = aws.String(v)
|
|
|
|
}
|
|
|
|
if v, ok := configured["compliance_resource_types"]; ok {
|
|
|
|
l := v.(*schema.Set)
|
|
|
|
if l.Len() > 0 {
|
|
|
|
scope.ComplianceResourceTypes = expandStringList(l.List())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v, ok := configured["tag_key"].(string); ok && v != "" {
|
|
|
|
scope.TagKey = aws.String(v)
|
|
|
|
}
|
|
|
|
if v, ok := configured["tag_value"].(string); ok && v != "" {
|
|
|
|
scope.TagValue = aws.String(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
return scope
|
|
|
|
}
|
|
|
|
|
2016-09-17 20:50:38 +02:00
|
|
|
// Takes a value containing JSON string and passes it through
|
|
|
|
// the JSON parser to normalize it, returns either a parsing
|
|
|
|
// error or normalized JSON string.
|
|
|
|
func normalizeJsonString(jsonString interface{}) (string, error) {
|
|
|
|
var j interface{}
|
|
|
|
|
|
|
|
if jsonString == nil || jsonString.(string) == "" {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s := jsonString.(string)
|
|
|
|
|
|
|
|
err := json.Unmarshal([]byte(s), &j)
|
|
|
|
if err != nil {
|
|
|
|
return s, err
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes, _ := json.Marshal(j)
|
|
|
|
return string(bytes[:]), nil
|
|
|
|
}
|
2017-01-17 18:30:46 +01:00
|
|
|
|
2017-01-20 15:00:32 +01:00
|
|
|
// Takes a value containing YAML string and passes it through
|
|
|
|
// the YAML parser. Returns either a parsing
|
|
|
|
// error or original YAML string.
|
|
|
|
func checkYamlString(yamlString interface{}) (string, error) {
|
|
|
|
var y interface{}
|
|
|
|
|
|
|
|
if yamlString == nil || yamlString.(string) == "" {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s := yamlString.(string)
|
|
|
|
|
|
|
|
err := yaml.Unmarshal([]byte(s), &y)
|
|
|
|
if err != nil {
|
|
|
|
return s, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func normalizeCloudFormationTemplate(templateString interface{}) (string, error) {
|
|
|
|
if looksLikeJsonString(templateString) {
|
|
|
|
return normalizeJsonString(templateString)
|
|
|
|
} else {
|
|
|
|
return checkYamlString(templateString)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-17 18:30:46 +01:00
|
|
|
func flattenInspectorTags(cfTags []*cloudformation.Tag) map[string]string {
|
|
|
|
tags := make(map[string]string, len(cfTags))
|
|
|
|
for _, t := range cfTags {
|
|
|
|
tags[*t.Key] = *t.Value
|
|
|
|
}
|
|
|
|
return tags
|
|
|
|
}
|
2017-03-18 15:18:19 +01:00
|
|
|
|
|
|
|
func flattenApiGatewayUsageApiStages(s []*apigateway.ApiStage) []map[string]interface{} {
|
|
|
|
stages := make([]map[string]interface{}, 0)
|
|
|
|
|
|
|
|
for _, bd := range s {
|
|
|
|
if bd.ApiId != nil && bd.Stage != nil {
|
|
|
|
stage := make(map[string]interface{})
|
|
|
|
stage["api_id"] = *bd.ApiId
|
|
|
|
stage["stage"] = *bd.Stage
|
|
|
|
|
|
|
|
stages = append(stages, stage)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(stages) > 0 {
|
|
|
|
return stages
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenApiGatewayUsagePlanThrottling(s *apigateway.ThrottleSettings) []map[string]interface{} {
|
|
|
|
settings := make(map[string]interface{}, 0)
|
|
|
|
|
|
|
|
if s == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.BurstLimit != nil {
|
|
|
|
settings["burst_limit"] = *s.BurstLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.RateLimit != nil {
|
|
|
|
settings["rate_limit"] = *s.RateLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
return []map[string]interface{}{settings}
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenApiGatewayUsagePlanQuota(s *apigateway.QuotaSettings) []map[string]interface{} {
|
|
|
|
settings := make(map[string]interface{}, 0)
|
|
|
|
|
|
|
|
if s == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.Limit != nil {
|
|
|
|
settings["limit"] = *s.Limit
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.Offset != nil {
|
|
|
|
settings["offset"] = *s.Offset
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.Period != nil {
|
|
|
|
settings["period"] = *s.Period
|
|
|
|
}
|
|
|
|
|
|
|
|
return []map[string]interface{}{settings}
|
|
|
|
}
|
2017-04-21 11:53:48 +02:00
|
|
|
|
2017-04-24 20:43:56 +02:00
|
|
|
func buildApiGatewayInvokeURL(restApiId, region, stageName string) string {
|
|
|
|
return fmt.Sprintf("https://%s.execute-api.%s.amazonaws.com/%s",
|
|
|
|
restApiId, region, stageName)
|
|
|
|
}
|
|
|
|
|
|
|
|
func buildApiGatewayExecutionARN(restApiId, region, accountId string) (string, error) {
|
|
|
|
if accountId == "" {
|
|
|
|
return "", fmt.Errorf("Unable to build execution ARN for %s as account ID is missing",
|
|
|
|
restApiId)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("arn:aws:execute-api:%s:%s:%s",
|
|
|
|
region, accountId, restApiId), nil
|
|
|
|
}
|
|
|
|
|
2017-04-21 11:53:48 +02:00
|
|
|
func expandCognitoSupportedLoginProviders(config map[string]interface{}) map[string]*string {
|
|
|
|
m := map[string]*string{}
|
|
|
|
for k, v := range config {
|
|
|
|
s := v.(string)
|
|
|
|
m[k] = &s
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenCognitoSupportedLoginProviders(config map[string]*string) map[string]string {
|
|
|
|
m := map[string]string{}
|
|
|
|
for k, v := range config {
|
|
|
|
m[k] = *v
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandCognitoIdentityProviders(s *schema.Set) []*cognitoidentity.Provider {
|
|
|
|
ips := make([]*cognitoidentity.Provider, 0)
|
|
|
|
|
|
|
|
for _, v := range s.List() {
|
|
|
|
s := v.(map[string]interface{})
|
|
|
|
|
|
|
|
ip := &cognitoidentity.Provider{}
|
|
|
|
|
|
|
|
if sv, ok := s["client_id"].(string); ok {
|
|
|
|
ip.ClientId = aws.String(sv)
|
|
|
|
}
|
|
|
|
|
|
|
|
if sv, ok := s["provider_name"].(string); ok {
|
|
|
|
ip.ProviderName = aws.String(sv)
|
|
|
|
}
|
|
|
|
|
|
|
|
if sv, ok := s["server_side_token_check"].(bool); ok {
|
|
|
|
ip.ServerSideTokenCheck = aws.Bool(sv)
|
|
|
|
}
|
|
|
|
|
|
|
|
ips = append(ips, ip)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ips
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenCognitoIdentityProviders(ips []*cognitoidentity.Provider) []map[string]interface{} {
|
|
|
|
values := make([]map[string]interface{}, 0)
|
|
|
|
|
|
|
|
for _, v := range ips {
|
|
|
|
ip := make(map[string]interface{})
|
|
|
|
|
|
|
|
if v == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if v.ClientId != nil {
|
|
|
|
ip["client_id"] = *v.ClientId
|
|
|
|
}
|
|
|
|
|
|
|
|
if v.ProviderName != nil {
|
|
|
|
ip["provider_name"] = *v.ProviderName
|
|
|
|
}
|
|
|
|
|
|
|
|
if v.ServerSideTokenCheck != nil {
|
|
|
|
ip["server_side_token_check"] = *v.ServerSideTokenCheck
|
|
|
|
}
|
|
|
|
|
|
|
|
values = append(values, ip)
|
|
|
|
}
|
|
|
|
|
|
|
|
return values
|
|
|
|
}
|