diff --git a/builtin/providers/aws/resource_aws_emr_cluster.go b/builtin/providers/aws/resource_aws_emr_cluster.go index adb9b4c15..6464f12ed 100644 --- a/builtin/providers/aws/resource_aws_emr_cluster.go +++ b/builtin/providers/aws/resource_aws_emr_cluster.go @@ -380,7 +380,10 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsEMRClusterUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).emrconn + d.Partial(true) + if d.HasChange("core_instance_count") { + d.SetPartial("core_instance_count") log.Printf("[DEBUG] Modify EMR cluster") groups, err := fetchAllEMRInstanceGroups(meta, d.Id()) if err != nil { @@ -409,24 +412,31 @@ func resourceAwsEMRClusterUpdate(d *schema.ResourceData, meta interface{}) error } log.Printf("[DEBUG] Modify EMR Cluster done...") + + log.Println("[INFO] Waiting for EMR Cluster to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"STARTING", "BOOTSTRAPPING"}, + Target: []string{"WAITING", "RUNNING"}, + Refresh: resourceAwsEMRClusterStateRefreshFunc(d, meta), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 5 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\" after modification: %s", err) + } } - log.Println( - "[INFO] Waiting for EMR Cluster to be available") - - stateConf := &resource.StateChangeConf{ - Pending: []string{"STARTING", "BOOTSTRAPPING"}, - Target: []string{"WAITING", "RUNNING"}, - Refresh: resourceAwsEMRClusterStateRefreshFunc(d, meta), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 5 * time.Second, + if err := setTagsEMR(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") } - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf("[WARN] Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\" after modification: %s", err) - } + d.Partial(false) return resourceAwsEMRClusterRead(d, meta) } @@ -593,6 +603,64 @@ func tagsToMapEMR(ts []*emr.Tag) map[string]string { return result } +func diffTagsEMR(oldTags, newTags []*emr.Tag) ([]*emr.Tag, []*emr.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*emr.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return expandTags(create), remove +} + +func setTagsEMR(conn *emr.EMR, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsEMR(expandTags(o), expandTags(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %s", remove) + k := make([]*string, len(remove), len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.RemoveTags(&emr.RemoveTagsInput{ + ResourceId: aws.String(d.Id()), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %s", create) + _, err := conn.AddTags(&emr.AddTagsInput{ + ResourceId: aws.String(d.Id()), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + func expandBootstrapActions(bootstrapActions []interface{}) []*emr.BootstrapActionConfig { actionsOut := []*emr.BootstrapActionConfig{} diff --git a/builtin/providers/aws/resource_aws_emr_cluster_test.go b/builtin/providers/aws/resource_aws_emr_cluster_test.go index b125afc7e..0521bf1de 100644 --- a/builtin/providers/aws/resource_aws_emr_cluster_test.go +++ b/builtin/providers/aws/resource_aws_emr_cluster_test.go @@ -29,6 +29,45 @@ func TestAccAWSEMRCluster_basic(t *testing.T) { }) } +func TestAccAWSEMRCluster_tags(t *testing.T) { + var jobFlow emr.RunJobFlowOutput + r := acctest.RandInt() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEmrDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSEmrClusterConfig(r), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + resource.TestCheckResourceAttr("aws_emr_cluster.tf-test-cluster", "tags.%", "4"), + resource.TestCheckResourceAttr( + "aws_emr_cluster.tf-test-cluster", "tags.role", "rolename"), + resource.TestCheckResourceAttr( + "aws_emr_cluster.tf-test-cluster", "tags.dns_zone", "env_zone"), + resource.TestCheckResourceAttr( + "aws_emr_cluster.tf-test-cluster", "tags.env", "env"), + resource.TestCheckResourceAttr( + "aws_emr_cluster.tf-test-cluster", "tags.name", "name-env")), + }, + { + Config: testAccAWSEmrClusterConfigUpdatedTags(r), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &jobFlow), + resource.TestCheckResourceAttr("aws_emr_cluster.tf-test-cluster", "tags.%", "3"), + resource.TestCheckResourceAttr( + "aws_emr_cluster.tf-test-cluster", "tags.dns_zone", "new_zone"), + resource.TestCheckResourceAttr( + "aws_emr_cluster.tf-test-cluster", "tags.Env", "production"), + resource.TestCheckResourceAttr( + "aws_emr_cluster.tf-test-cluster", "tags.name", "name-env"), + ), + }, + }, + }) +} + func testAccCheckAWSEmrDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).emrconn @@ -121,8 +160,290 @@ resource "aws_emr_cluster" "tf-test-cluster" { name = "name-env" } - keep_job_flow_alive_when_no_steps = true - termination_protection = false + keep_job_flow_alive_when_no_steps = true + termination_protection = false + + bootstrap_action { + path = "s3://elasticmapreduce/bootstrap-actions/run-if" + name = "runif" + args = ["instance.isMaster=true", "echo running on master node"] + } + + configurations = "test-fixtures/emr_configurations.json" + + depends_on = ["aws_main_route_table_association.a"] + + service_role = "${aws_iam_role.iam_emr_default_role.arn}" +} + +resource "aws_security_group" "allow_all" { + name = "allow_all" + description = "Allow all inbound traffic" + vpc_id = "${aws_vpc.main.id}" + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + depends_on = ["aws_subnet.main"] + + lifecycle { + ignore_changes = ["ingress", "egress"] + } + + tags { + name = "emr_test" + } +} + +resource "aws_vpc" "main" { + cidr_block = "168.31.0.0/16" + enable_dns_hostnames = true + + tags { + name = "emr_test" + } +} + +resource "aws_subnet" "main" { + vpc_id = "${aws_vpc.main.id}" + cidr_block = "168.31.0.0/20" + + tags { + name = "emr_test" + } +} + +resource "aws_internet_gateway" "gw" { + vpc_id = "${aws_vpc.main.id}" +} + +resource "aws_route_table" "r" { + vpc_id = "${aws_vpc.main.id}" + + route { + cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.gw.id}" + } +} + +resource "aws_main_route_table_association" "a" { + vpc_id = "${aws_vpc.main.id}" + route_table_id = "${aws_route_table.r.id}" +} + +### + +# IAM things + +### + +# IAM role for EMR Service +resource "aws_iam_role" "iam_emr_default_role" { + name = "iam_emr_default_role_%d" + + assume_role_policy = <