From 7dd15469a5aa97a973f0a5590a7c2ac84cb9373c Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 30 Oct 2015 23:55:00 +0000 Subject: [PATCH 1/6] Adding the ability to specify a snapshot window and retention limit for Redis ElastiCache clusters --- .../aws/resource_aws_elasticache_cluster.go | 37 ++++++++++++ .../resource_aws_elasticache_cluster_test.go | 58 +++++++++++++++++++ .../aws/r/elasticache_cluster.html.markdown | 9 +++ 3 files changed, 104 insertions(+) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 3460fb292..dde2cd5e3 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -138,6 +138,24 @@ func resourceAwsElasticacheCluster() *schema.Resource { }, }, + "snapshot_window": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "snapshot_retention_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value > 35 { + es = append(es, fmt.Errorf( + "snapshot retention limit cannot be more than 35 days")) + } + return + }, + }, + "tags": tagsSchema(), // apply_immediately is used to determine when the update modifications @@ -187,6 +205,14 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.CacheParameterGroupName = aws.String(v.(string)) } + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("snapshot_window"); ok { + req.SnapshotWindow = aws.String(v.(string)) + } + if v, ok := d.GetOk("maintenance_window"); ok { req.PreferredMaintenanceWindow = aws.String(v.(string)) } @@ -261,6 +287,8 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) + d.Set("snapshot_window", c.SnapshotWindow) + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) @@ -344,6 +372,15 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ requestUpdate = true } + if d.HasChange("snapshot_window") { + req.EngineVersion = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } + + if d.HasChange("snapshot_retention_limit") { + req.NumCacheNodes = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + } + if d.HasChange("num_cache_nodes") { req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) requestUpdate = true diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index b93060028..d084224fb 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -33,6 +33,28 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) { }) } +func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { + var ec elasticache.CacheCluster + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSElasticacheClusterConfig_snapshots, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), + testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"), + ), + }, + }, + }) +} + func TestAccAWSElasticacheCluster_vpc(t *testing.T) { var csg elasticache.CacheSubnetGroup var ec elasticache.CacheCluster @@ -149,6 +171,42 @@ resource "aws_elasticache_cluster" "bar" { port = 11211 parameter_group_name = "default.memcached1.4" security_group_names = ["${aws_elasticache_security_group.bar.name}"] + snapshot_window = "05:00-09:00" + snapshot_retention_limit = 3 +} +`, genRandInt(), genRandInt(), genRandInt()) + +var testAccAWSElasticacheClusterConfig_snapshots = fmt.Sprintf(` +provider "aws" { + region = "us-east-1" +} +resource "aws_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_elasticache_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + security_group_names = ["${aws_security_group.bar.name}"] +} + +resource "aws_elasticache_cluster" "bar" { + cluster_id = "tf-test-%03d" + engine = "redis" + node_type = "cache.m1.small" + num_cache_nodes = 1 + port = 6379 + parameter_group_name = "default.redis2.8" + security_group_names = ["${aws_elasticache_security_group.bar.name}"] + snapshot_window = "05:00-09:00" + snapshot_retention_limit = 3 } `, genRandInt(), genRandInt(), genRandInt()) diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index ef1d69ed4..4a4cb4d76 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -73,6 +73,15 @@ names to associate with this cache cluster Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. Example: `arn:aws:s3:::my_bucket/snapshot1.rdb` +* `snapshot_window` - (Optional) The daily time range (in UTC) during which ElastiCache will +begin taking a daily snapshot of your cache cluster. Can only be used for the Redis engine. Example: 05:00-09:00 + +* `snapshow_retention_limit` - (Optional) The number of days for which ElastiCache will +retain automatic cache cluster snapshots before deleting them. For example, if you set +SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days +before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. +Can only be used for the Redis engine. + * `notification_topic_arn` – (Optional) An Amazon Resource Name (ARN) of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` From 4f05df6cad9773c2bda92bae8def4017ec3d1041 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 2 Nov 2015 20:57:04 +0000 Subject: [PATCH 2/6] When I was setting the update parameters for the Snapshotting, I didn't update the copy/pasted params --- builtin/providers/aws/resource_aws_elasticache_cluster.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index dde2cd5e3..a33321c3b 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -373,12 +373,11 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ } if d.HasChange("snapshot_window") { - req.EngineVersion = aws.String(d.Get("snapshot_window").(string)) - requestUpdate = true + req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) } if d.HasChange("snapshot_retention_limit") { - req.NumCacheNodes = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) } if d.HasChange("num_cache_nodes") { From 707bfd739aa2bdd9748896e3cbc5b6e02d1e1077 Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 3 Nov 2015 12:35:06 +0000 Subject: [PATCH 3/6] Added an extra test for the Elasticache Cluster to show that updates work. Also added some debugging to show that the API returns the Elasticache retention period info --- .../aws/resource_aws_elasticache_cluster.go | 2 + .../resource_aws_elasticache_cluster_test.go | 67 +++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index a33321c3b..a2c312d35 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -287,7 +287,9 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) + log.Printf("[INFO] Found %s as the Snapshow Window", *c.SnapshotWindow) d.Set("snapshot_window", c.SnapshotWindow) + log.Printf("[INFO] Found %d as the Snapshow Retention Limit", *c.SnapshotRetentionLimit) d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index d084224fb..666be2c8a 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -54,6 +54,39 @@ func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { }, }) } +func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { + var ec elasticache.CacheCluster + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSElasticacheClusterConfig_snapshots, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), + testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"), + ), + }, + + resource.TestStep{ + Config: testAccAWSElasticacheClusterConfig_snapshotsUpdated, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), + testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_window", "07:00-09:00"), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_retention_limit", "7"), + ), + }, + }, + }) +} func TestAccAWSElasticacheCluster_vpc(t *testing.T) { var csg elasticache.CacheSubnetGroup @@ -210,6 +243,40 @@ resource "aws_elasticache_cluster" "bar" { } `, genRandInt(), genRandInt(), genRandInt()) +var testAccAWSElasticacheClusterConfig_snapshotsUpdated = fmt.Sprintf(` +provider "aws" { + region = "us-east-1" +} +resource "aws_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_elasticache_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + security_group_names = ["${aws_security_group.bar.name}"] +} + +resource "aws_elasticache_cluster" "bar" { + cluster_id = "tf-test-%03d" + engine = "redis" + node_type = "cache.m1.small" + num_cache_nodes = 1 + port = 6379 + parameter_group_name = "default.redis2.8" + security_group_names = ["${aws_elasticache_security_group.bar.name}"] + snapshot_window = "07:00-09:00" + snapshot_retention_limit = 7 +} +`, genRandInt(), genRandInt(), genRandInt()) + var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(` resource "aws_vpc" "foo" { cidr_block = "192.168.0.0/16" From ca2ea80af36ddd1907c0daa8bcd74e4985301145 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 5 Nov 2015 12:23:07 +0000 Subject: [PATCH 4/6] Making the changes to the snapshotting for Elasticache Redis as per @catsby's findings --- .../aws/resource_aws_elasticache_cluster.go | 37 +++++++++++-------- .../resource_aws_elasticache_cluster_test.go | 34 ++++++++++------- 2 files changed, 43 insertions(+), 28 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index a2c312d35..69777a866 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -205,12 +205,14 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.CacheParameterGroupName = aws.String(v.(string)) } - if v, ok := d.GetOk("snapshot_retention_limit"); ok { - req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) - } + if !strings.Contains(d.Get("node_type").(string), "cache.t2") { + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } - if v, ok := d.GetOk("snapshot_window"); ok { - req.SnapshotWindow = aws.String(v.(string)) + if v, ok := d.GetOk("snapshot_window"); ok { + req.SnapshotWindow = aws.String(v.(string)) + } } if v, ok := d.GetOk("maintenance_window"); ok { @@ -287,10 +289,12 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) - log.Printf("[INFO] Found %s as the Snapshow Window", *c.SnapshotWindow) - d.Set("snapshot_window", c.SnapshotWindow) - log.Printf("[INFO] Found %d as the Snapshow Retention Limit", *c.SnapshotRetentionLimit) - d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) + if c.SnapshotWindow != nil { + d.Set("snapshot_window", c.SnapshotWindow) + } + if c.SnapshotRetentionLimit != nil { + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) + } if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) @@ -373,13 +377,16 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ req.EngineVersion = aws.String(d.Get("engine_version").(string)) requestUpdate = true } + if !strings.Contains(d.Get("node_type").(string), "cache.t2") { + if d.HasChange("snapshot_window") { + req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } - if d.HasChange("snapshot_window") { - req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) - } - - if d.HasChange("snapshot_retention_limit") { - req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + if d.HasChange("snapshot_retention_limit") { + req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + requestUpdate = true + } } if d.HasChange("num_cache_nodes") { diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index 666be2c8a..78e28763d 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -35,13 +35,17 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) { func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { var ec elasticache.CacheCluster + + ri := genRandInt() + config := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSElasticacheClusterConfig_snapshots, + Config: config, Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), @@ -56,13 +60,18 @@ func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { } func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { var ec elasticache.CacheCluster + + ri := genRandInt() + preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) + postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSElasticacheClusterConfig_snapshots, + Config: preConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), @@ -74,7 +83,7 @@ func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { }, resource.TestStep{ - Config: testAccAWSElasticacheClusterConfig_snapshotsUpdated, + Config: postConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), @@ -204,17 +213,15 @@ resource "aws_elasticache_cluster" "bar" { port = 11211 parameter_group_name = "default.memcached1.4" security_group_names = ["${aws_elasticache_security_group.bar.name}"] - snapshot_window = "05:00-09:00" - snapshot_retention_limit = 3 } `, genRandInt(), genRandInt(), genRandInt()) -var testAccAWSElasticacheClusterConfig_snapshots = fmt.Sprintf(` +var testAccAWSElasticacheClusterConfig_snapshots = ` provider "aws" { region = "us-east-1" } resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" + name = "tf-test-security-group" description = "tf-test-security-group-descr" ingress { from_port = -1 @@ -225,7 +232,7 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" + name = "tf-test-security-group" description = "tf-test-security-group-descr" security_group_names = ["${aws_security_group.bar.name}"] } @@ -241,14 +248,14 @@ resource "aws_elasticache_cluster" "bar" { snapshot_window = "05:00-09:00" snapshot_retention_limit = 3 } -`, genRandInt(), genRandInt(), genRandInt()) +` -var testAccAWSElasticacheClusterConfig_snapshotsUpdated = fmt.Sprintf(` +var testAccAWSElasticacheClusterConfig_snapshotsUpdated = ` provider "aws" { region = "us-east-1" } resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" + name = "tf-test-security-group" description = "tf-test-security-group-descr" ingress { from_port = -1 @@ -259,7 +266,7 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" + name = "tf-test-security-group" description = "tf-test-security-group-descr" security_group_names = ["${aws_security_group.bar.name}"] } @@ -274,8 +281,9 @@ resource "aws_elasticache_cluster" "bar" { security_group_names = ["${aws_elasticache_security_group.bar.name}"] snapshot_window = "07:00-09:00" snapshot_retention_limit = 7 + apply_immediately = true } -`, genRandInt(), genRandInt(), genRandInt()) +` var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(` resource "aws_vpc" "foo" { From 350f91ec063b0057ea7fd37ef87227ca6225ad3a Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 6 Nov 2015 11:16:51 +0000 Subject: [PATCH 5/6] Removing the instance_type check in the ElastiCache cluster creation. We now allow the error to bubble up to the userr when the wrong instance type is used. The limitation for t2 instance types now allowing snapshotting is also now documented --- .../aws/resource_aws_elasticache_cluster.go | 37 ++++++++----------- .../resource_aws_elasticache_cluster_test.go | 35 +++--------------- .../aws/r/elasticache_cluster.html.markdown | 1 + 3 files changed, 21 insertions(+), 52 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 69777a866..6f178b71e 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -205,14 +205,12 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.CacheParameterGroupName = aws.String(v.(string)) } - if !strings.Contains(d.Get("node_type").(string), "cache.t2") { - if v, ok := d.GetOk("snapshot_retention_limit"); ok { - req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) - } + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } - if v, ok := d.GetOk("snapshot_window"); ok { - req.SnapshotWindow = aws.String(v.(string)) - } + if v, ok := d.GetOk("snapshot_window"); ok { + req.SnapshotWindow = aws.String(v.(string)) } if v, ok := d.GetOk("maintenance_window"); ok { @@ -289,12 +287,8 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) - if c.SnapshotWindow != nil { - d.Set("snapshot_window", c.SnapshotWindow) - } - if c.SnapshotRetentionLimit != nil { - d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) - } + d.Set("snapshot_window", c.SnapshotWindow) + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) @@ -377,16 +371,15 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ req.EngineVersion = aws.String(d.Get("engine_version").(string)) requestUpdate = true } - if !strings.Contains(d.Get("node_type").(string), "cache.t2") { - if d.HasChange("snapshot_window") { - req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) - requestUpdate = true - } - if d.HasChange("snapshot_retention_limit") { - req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) - requestUpdate = true - } + if d.HasChange("snapshot_window") { + req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } + + if d.HasChange("snapshot_retention_limit") { + req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + requestUpdate = true } if d.HasChange("num_cache_nodes") { diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index 78e28763d..0620ef47b 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -33,37 +33,12 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) { }) } -func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { - var ec elasticache.CacheCluster - - ri := genRandInt() - config := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"), - ), - }, - }, - }) -} func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { var ec elasticache.CacheCluster ri := genRandInt() - preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) - postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri) + preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -221,7 +196,7 @@ provider "aws" { region = "us-east-1" } resource "aws_security_group" "bar" { - name = "tf-test-security-group" + name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" ingress { from_port = -1 @@ -232,7 +207,7 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group" + name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" security_group_names = ["${aws_security_group.bar.name}"] } @@ -240,7 +215,7 @@ resource "aws_elasticache_security_group" "bar" { resource "aws_elasticache_cluster" "bar" { cluster_id = "tf-test-%03d" engine = "redis" - node_type = "cache.m1.small" + node_type = "cache.t2.small" num_cache_nodes = 1 port = 6379 parameter_group_name = "default.redis2.8" diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 4a4cb4d76..e39d6172a 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -88,6 +88,7 @@ SNS topic to send ElastiCache notifications to. Example: * `tags` - (Optional) A mapping of tags to assign to the resource. +~> **NOTE:** Snapshotting functionality is not compatible with t2 instance types. ## Attributes Reference From dbd2a43f464b80ab3c3893b7280e87d7105ce92f Mon Sep 17 00:00:00 2001 From: clint shryock Date: Fri, 6 Nov 2015 16:55:04 -0600 Subject: [PATCH 6/6] config updates for ElastiCache test --- .../providers/aws/resource_aws_elasticache_cluster_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index 0620ef47b..a17c5d9b1 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -215,7 +215,7 @@ resource "aws_elasticache_security_group" "bar" { resource "aws_elasticache_cluster" "bar" { cluster_id = "tf-test-%03d" engine = "redis" - node_type = "cache.t2.small" + node_type = "cache.m1.small" num_cache_nodes = 1 port = 6379 parameter_group_name = "default.redis2.8" @@ -230,7 +230,7 @@ provider "aws" { region = "us-east-1" } resource "aws_security_group" "bar" { - name = "tf-test-security-group" + name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" ingress { from_port = -1 @@ -241,7 +241,7 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group" + name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" security_group_names = ["${aws_security_group.bar.name}"] }