diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 382f0506e..987c0dd26 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -36,6 +36,7 @@ import ( "github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elasticbeanstalk" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/aws/aws-sdk-go/service/elastictranscoder" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/emr" "github.com/aws/aws-sdk-go/service/firehose" @@ -75,43 +76,44 @@ type Config struct { } type AWSClient struct { - cfconn *cloudformation.CloudFormation - cloudfrontconn *cloudfront.CloudFront - cloudtrailconn *cloudtrail.CloudTrail - cloudwatchconn *cloudwatch.CloudWatch - cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs - cloudwatcheventsconn *cloudwatchevents.CloudWatchEvents - dsconn *directoryservice.DirectoryService - dynamodbconn *dynamodb.DynamoDB - ec2conn *ec2.EC2 - ecrconn *ecr.ECR - ecsconn *ecs.ECS - efsconn *efs.EFS - elbconn *elb.ELB - emrconn *emr.EMR - esconn *elasticsearch.ElasticsearchService - apigateway *apigateway.APIGateway - autoscalingconn *autoscaling.AutoScaling - s3conn *s3.S3 - sqsconn *sqs.SQS - snsconn *sns.SNS - stsconn *sts.STS - redshiftconn *redshift.Redshift - r53conn *route53.Route53 - accountid string - region string - rdsconn *rds.RDS - iamconn *iam.IAM - kinesisconn *kinesis.Kinesis - kmsconn *kms.KMS - firehoseconn *firehose.Firehose - elasticacheconn *elasticache.ElastiCache - elasticbeanstalkconn *elasticbeanstalk.ElasticBeanstalk - lambdaconn *lambda.Lambda - opsworksconn *opsworks.OpsWorks - glacierconn *glacier.Glacier - codedeployconn *codedeploy.CodeDeploy - codecommitconn *codecommit.CodeCommit + cfconn *cloudformation.CloudFormation + cloudfrontconn *cloudfront.CloudFront + cloudtrailconn *cloudtrail.CloudTrail + cloudwatchconn *cloudwatch.CloudWatch + cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs + cloudwatcheventsconn *cloudwatchevents.CloudWatchEvents + dsconn *directoryservice.DirectoryService + dynamodbconn *dynamodb.DynamoDB + ec2conn *ec2.EC2 + ecrconn *ecr.ECR + ecsconn *ecs.ECS + efsconn *efs.EFS + elbconn *elb.ELB + emrconn *emr.EMR + esconn *elasticsearch.ElasticsearchService + apigateway *apigateway.APIGateway + autoscalingconn *autoscaling.AutoScaling + s3conn *s3.S3 + sqsconn *sqs.SQS + snsconn *sns.SNS + stsconn *sts.STS + redshiftconn *redshift.Redshift + r53conn *route53.Route53 + accountid string + region string + rdsconn *rds.RDS + iamconn *iam.IAM + kinesisconn *kinesis.Kinesis + kmsconn *kms.KMS + firehoseconn *firehose.Firehose + elasticacheconn *elasticache.ElastiCache + elasticbeanstalkconn *elasticbeanstalk.ElasticBeanstalk + elastictranscoderconn *elastictranscoder.ElasticTranscoder + lambdaconn *lambda.Lambda + opsworksconn *opsworks.OpsWorks + glacierconn *glacier.Glacier + codedeployconn *codedeploy.CodeDeploy + codecommitconn *codecommit.CodeCommit } // Client configures and returns a fully initialized AWSClient @@ -228,6 +230,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing Elastic Beanstalk Connection") client.elasticbeanstalkconn = elasticbeanstalk.New(sess) + log.Println("[INFO] Initializing Elastic Transcoder Connection") + client.elastictranscoderconn = elastictranscoder.New(sess) + authErr := c.ValidateAccountId(client.accountid) if authErr != nil { errs = append(errs, authErr) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 49f9b6494..274a3860c 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -178,6 +178,8 @@ func Provider() terraform.ResourceProvider { "aws_elastic_beanstalk_configuration_template": resourceAwsElasticBeanstalkConfigurationTemplate(), "aws_elastic_beanstalk_environment": resourceAwsElasticBeanstalkEnvironment(), "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), + "aws_elastictranscoder_pipeline": resourceAwsElasticTranscoderPipeline(), + "aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(), "aws_elb": resourceAwsElb(), "aws_flow_log": resourceAwsFlowLog(), "aws_glacier_vault": resourceAwsGlacierVault(), diff --git a/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline.go b/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline.go new file mode 100644 index 000000000..6da9a1866 --- /dev/null +++ b/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline.go @@ -0,0 +1,475 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elastictranscoder" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticTranscoderPipeline() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticTranscoderPipelineCreate, + Read: resourceAwsElasticTranscoderPipelineRead, + Update: resourceAwsElasticTranscoderPipelineUpdate, + Delete: resourceAwsElasticTranscoderPipelineDelete, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "aws_kms_key_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + // ContentConfig also requires ThumbnailConfig + "content_config": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + // elastictranscoder.PipelineOutputConfig + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + // AWS may insert the bucket name here taken from output_bucket + Computed: true, + }, + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "content_config_permissions": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "grantee": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "grantee_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "input_bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters, hyphens, underscores, and periods allowed in %q", k)) + } + if len(value) > 40 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 40 characters", k)) + } + return + }, + }, + + "notifications": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "completed": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "error": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "progressing": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "warning": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + // The output_bucket must be set, or both of content_config.bucket + // and thumbnail_config.bucket. + // This is set as Computed, because the API may or may not return + // this as set based on the other 2 configurations. + "output_bucket": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "role": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "thumbnail_config": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + // elastictranscoder.PipelineOutputConfig + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + // AWS may insert the bucket name here taken from output_bucket + Computed: true, + }, + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "thumbnail_config_permissions": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "grantee": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "grantee_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsElasticTranscoderPipelineCreate(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + req := &elastictranscoder.CreatePipelineInput{ + AwsKmsKeyArn: getStringPtr(d, "aws_kms_key_arn"), + ContentConfig: expandETPiplineOutputConfig(d, "content_config"), + InputBucket: aws.String(d.Get("input_bucket").(string)), + Notifications: expandETNotifications(d), + OutputBucket: getStringPtr(d, "output_bucket"), + Role: getStringPtr(d, "role"), + ThumbnailConfig: expandETPiplineOutputConfig(d, "thumbnail_config"), + } + + if name, ok := d.GetOk("name"); ok { + req.Name = aws.String(name.(string)) + } else { + name := resource.PrefixedUniqueId("tf-et-") + d.Set("name", name) + req.Name = aws.String(name) + } + + if (req.OutputBucket == nil && (req.ContentConfig == nil || req.ContentConfig.Bucket == nil)) || + (req.OutputBucket != nil && req.ContentConfig != nil && req.ContentConfig.Bucket != nil) { + return fmt.Errorf("[ERROR] you must specify only one of output_bucket or content_config.bucket") + } + + log.Printf("[DEBUG] Elastic Transcoder Pipeline create opts: %s", req) + resp, err := elastictranscoderconn.CreatePipeline(req) + if err != nil { + return fmt.Errorf("Error creating Elastic Transcoder Pipeline: %s", err) + } + + d.SetId(*resp.Pipeline.Id) + + for _, w := range resp.Warnings { + log.Printf("[WARN] Elastic Transcoder Pipeline %s: %s", w.Code, w.Message) + } + + return resourceAwsElasticTranscoderPipelineRead(d, meta) +} + +func expandETNotifications(d *schema.ResourceData) *elastictranscoder.Notifications { + set, ok := d.GetOk("notifications") + if !ok { + return nil + } + + s := set.(*schema.Set) + if s == nil || s.Len() == 0 { + return nil + } + + m := s.List()[0].(map[string]interface{}) + + return &elastictranscoder.Notifications{ + Completed: getStringPtr(m, "completed"), + Error: getStringPtr(m, "error"), + Progressing: getStringPtr(m, "progressing"), + Warning: getStringPtr(m, "warning"), + } +} + +func flattenETNotifications(n *elastictranscoder.Notifications) []map[string]interface{} { + if n == nil { + return nil + } + + allEmpty := func(s ...*string) bool { + for _, s := range s { + if s != nil && *s != "" { + return false + } + } + return true + } + + // the API always returns a Notifications value, even when all fields are nil + if allEmpty(n.Completed, n.Error, n.Progressing, n.Warning) { + return nil + } + + m := setMap(make(map[string]interface{})) + + m.SetString("completed", n.Completed) + m.SetString("error", n.Error) + m.SetString("progressing", n.Progressing) + m.SetString("warning", n.Warning) + return m.MapList() +} + +func expandETPiplineOutputConfig(d *schema.ResourceData, key string) *elastictranscoder.PipelineOutputConfig { + set, ok := d.GetOk(key) + if !ok { + return nil + } + + s := set.(*schema.Set) + if s == nil || s.Len() == 0 { + return nil + } + + cc := s.List()[0].(map[string]interface{}) + + cfg := &elastictranscoder.PipelineOutputConfig{ + Bucket: getStringPtr(cc, "bucket"), + StorageClass: getStringPtr(cc, "storage_class"), + } + + switch key { + case "content_config": + cfg.Permissions = expandETPermList(d.Get("content_config_permissions").(*schema.Set)) + case "thumbnail_config": + cfg.Permissions = expandETPermList(d.Get("thumbnail_config_permissions").(*schema.Set)) + } + + return cfg +} + +func flattenETPipelineOutputConfig(cfg *elastictranscoder.PipelineOutputConfig) []map[string]interface{} { + m := setMap(make(map[string]interface{})) + + m.SetString("bucket", cfg.Bucket) + m.SetString("storage_class", cfg.StorageClass) + + return m.MapList() +} + +func expandETPermList(permissions *schema.Set) []*elastictranscoder.Permission { + var perms []*elastictranscoder.Permission + + for _, p := range permissions.List() { + perm := &elastictranscoder.Permission{ + Access: getStringPtrList(p.(map[string]interface{}), "access"), + Grantee: getStringPtr(p, "grantee"), + GranteeType: getStringPtr(p, "grantee_type"), + } + perms = append(perms, perm) + } + return perms +} + +func flattenETPermList(perms []*elastictranscoder.Permission) []map[string]interface{} { + var set []map[string]interface{} + + for _, p := range perms { + m := setMap(make(map[string]interface{})) + m.Set("access", flattenStringList(p.Access)) + m.SetString("grantee", p.Grantee) + m.SetString("grantee_type", p.GranteeType) + + set = append(set, m) + } + return set +} + +func resourceAwsElasticTranscoderPipelineUpdate(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + req := &elastictranscoder.UpdatePipelineInput{ + Id: aws.String(d.Id()), + } + + if d.HasChange("aws_kms_key_arn") { + req.AwsKmsKeyArn = getStringPtr(d, "aws_kms_key_arn") + } + + if d.HasChange("content_config") { + req.ContentConfig = expandETPiplineOutputConfig(d, "content_config") + } + + if d.HasChange("input_bucket") { + req.InputBucket = getStringPtr(d, "input_bucket") + } + + if d.HasChange("name") { + req.Name = getStringPtr(d, "name") + } + + if d.HasChange("notifications") { + req.Notifications = expandETNotifications(d) + } + + if d.HasChange("role") { + req.Role = getStringPtr(d, "role") + } + + if d.HasChange("thumbnail_config") { + req.ThumbnailConfig = expandETPiplineOutputConfig(d, "thumbnail_config") + } + + log.Printf("[DEBUG] Updating Elastic Transcoder Pipeline: %#v", req) + output, err := elastictranscoderconn.UpdatePipeline(req) + if err != nil { + return fmt.Errorf("Error updating Elastic Transcoder pipeline: %s", err) + } + + for _, w := range output.Warnings { + log.Printf("[WARN] Elastic Transcoder Pipeline %s: %s", w.Code, w.Message) + } + + return resourceAwsElasticTranscoderPipelineRead(d, meta) +} + +func resourceAwsElasticTranscoderPipelineRead(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + resp, err := elastictranscoderconn.ReadPipeline(&elastictranscoder.ReadPipelineInput{ + Id: aws.String(d.Id()), + }) + + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() == "ResourceNotFoundException" { + d.SetId("") + return nil + } + return err + } + + log.Printf("[DEBUG] Elastic Transcoder Pipeline Read response: %#v", resp) + + pipeline := resp.Pipeline + + d.Set("arn", *pipeline.Arn) + + if arn := pipeline.AwsKmsKeyArn; arn != nil { + d.Set("aws_kms_key_arn", *arn) + } + + if pipeline.ContentConfig != nil { + err := d.Set("content_config", flattenETPipelineOutputConfig(pipeline.ContentConfig)) + if err != nil { + return fmt.Errorf("error setting content_config: %s", err) + } + + if pipeline.ContentConfig.Permissions != nil { + err := d.Set("content_config_permissions", flattenETPermList(pipeline.ContentConfig.Permissions)) + if err != nil { + return fmt.Errorf("error setting content_config_permissions: %s", err) + } + } + } + + d.Set("input_bucket", *pipeline.InputBucket) + d.Set("name", *pipeline.Name) + + notifications := flattenETNotifications(pipeline.Notifications) + if notifications != nil { + if err := d.Set("notifications", notifications); err != nil { + return fmt.Errorf("error setting notifications: %s", err) + } + } + + d.Set("role", *pipeline.Role) + + if pipeline.ThumbnailConfig != nil { + err := d.Set("thumbnail_config", flattenETPipelineOutputConfig(pipeline.ThumbnailConfig)) + if err != nil { + return fmt.Errorf("error setting thumbnail_config: %s", err) + } + + if pipeline.ThumbnailConfig.Permissions != nil { + err := d.Set("thumbnail_config_permissions", flattenETPermList(pipeline.ThumbnailConfig.Permissions)) + if err != nil { + return fmt.Errorf("error setting thumbnail_config_permissions: %s", err) + } + } + } + + if pipeline.OutputBucket != nil { + d.Set("output_bucket", *pipeline.OutputBucket) + } + + return nil +} + +func resourceAwsElasticTranscoderPipelineDelete(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + log.Printf("[DEBUG] Elastic Transcoder Delete Pipeline: %s", d.Id()) + _, err := elastictranscoderconn.DeletePipeline(&elastictranscoder.DeletePipelineInput{ + Id: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("error deleting Elastic Transcoder Pipeline: %s", err) + } + return nil +} diff --git a/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline_test.go b/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline_test.go new file mode 100644 index 000000000..b7c276476 --- /dev/null +++ b/builtin/providers/aws/resource_aws_elastic_transcoder_pipeline_test.go @@ -0,0 +1,328 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elastictranscoder" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSElasticTranscoderPipeline_basic(t *testing.T) { + pipeline := &elastictranscoder.Pipeline{} + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_elastictranscoder_pipeline.bar", + Providers: testAccProviders, + CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: awsElasticTranscoderPipelineConfigBasic, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", pipeline), + ), + }, + }, + }) +} + +func TestAccAWSElasticTranscoderPipeline_withContentConfig(t *testing.T) { + pipeline := &elastictranscoder.Pipeline{} + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_elastictranscoder_pipeline.bar", + Providers: testAccProviders, + CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: awsElasticTranscoderPipelineWithContentConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", pipeline), + ), + }, + resource.TestStep{ + Config: awsElasticTranscoderPipelineWithContentConfigUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.bar", pipeline), + ), + }, + }, + }) +} + +func TestAccAWSElasticTranscoderPipeline_withPermissions(t *testing.T) { + pipeline := &elastictranscoder.Pipeline{} + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_elastictranscoder_pipeline.baz", + Providers: testAccProviders, + CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: awsElasticTranscoderPipelineWithPerms, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticTranscoderPipelineExists("aws_elastictranscoder_pipeline.baz", pipeline), + ), + }, + }, + }) +} + +func testAccCheckAWSElasticTranscoderPipelineExists(n string, res *elastictranscoder.Pipeline) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Pipeline ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).elastictranscoderconn + + out, err := conn.ReadPipeline(&elastictranscoder.ReadPipelineInput{ + Id: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + *res = *out.Pipeline + + return nil + } +} + +func testAccCheckElasticTranscoderPipelineDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).elastictranscoderconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_elastictranscoder_pipline" { + continue + } + + out, err := conn.ReadPipeline(&elastictranscoder.ReadPipelineInput{ + Id: aws.String(rs.Primary.ID), + }) + + if err == nil { + if out.Pipeline != nil && *out.Pipeline.Id == rs.Primary.ID { + return fmt.Errorf("Elastic Transcoder Pipeline still exists") + } + } + + awsErr, ok := err.(awserr.Error) + if !ok { + return err + } + + if awsErr.Code() != "ResourceNotFoundException" { + return fmt.Errorf("unexpected error: %s", awsErr) + } + + } + return nil +} + +const awsElasticTranscoderPipelineConfigBasic = ` +resource "aws_elastictranscoder_pipeline" "bar" { + input_bucket = "${aws_s3_bucket.test_bucket.bucket}" + output_bucket = "${aws_s3_bucket.test_bucket.bucket}" + name = "aws_elastictranscoder_pipeline_tf_test_" + role = "${aws_iam_role.test_role.arn}" +} + +resource "aws_iam_role" "test_role" { + name = "aws_elastictranscoder_pipeline_tf_test_role_" + + assume_role_policy = < 0 { + s[key] = m + } +} + +// Set assigns value to s[key] if value isn't nil +func (s setMap) Set(key string, value interface{}) { + if reflect.ValueOf(value).IsNil() { + return + } + + s[key] = value +} + +// Map returns the raw map type for a shorter type conversion +func (s setMap) Map() map[string]interface{} { + return map[string]interface{}(s) +} + +// MapList returns the map[string]interface{} as a single element in a slice to +// match the schema.Set data type used for structs. +func (s setMap) MapList() []map[string]interface{} { + return []map[string]interface{}{s.Map()} +}