diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index f2410dcd5..18a4ddf41 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -138,6 +138,24 @@ func resourceAwsElasticacheCluster() *schema.Resource { }, }, + "snapshot_window": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "snapshot_retention_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value > 35 { + es = append(es, fmt.Errorf( + "snapshot retention limit cannot be more than 35 days")) + } + return + }, + }, + "tags": tagsSchema(), // apply_immediately is used to determine when the update modifications @@ -187,6 +205,14 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.CacheParameterGroupName = aws.String(v.(string)) } + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("snapshot_window"); ok { + req.SnapshotWindow = aws.String(v.(string)) + } + if v, ok := d.GetOk("maintenance_window"); ok { req.PreferredMaintenanceWindow = aws.String(v.(string)) } @@ -267,6 +293,8 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) + d.Set("snapshot_window", c.SnapshotWindow) + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) @@ -350,6 +378,16 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ requestUpdate = true } + if d.HasChange("snapshot_window") { + req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } + + if d.HasChange("snapshot_retention_limit") { + req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + requestUpdate = true + } + if d.HasChange("num_cache_nodes") { req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) requestUpdate = true diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index b93060028..a17c5d9b1 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -33,6 +33,45 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) { }) } +func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { + var ec elasticache.CacheCluster + + ri := genRandInt() + preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), + testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"), + ), + }, + + resource.TestStep{ + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), + testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_window", "07:00-09:00"), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_retention_limit", "7"), + ), + }, + }, + }) +} + func TestAccAWSElasticacheCluster_vpc(t *testing.T) { var csg elasticache.CacheSubnetGroup var ec elasticache.CacheCluster @@ -152,6 +191,75 @@ resource "aws_elasticache_cluster" "bar" { } `, genRandInt(), genRandInt(), genRandInt()) +var testAccAWSElasticacheClusterConfig_snapshots = ` +provider "aws" { + region = "us-east-1" +} +resource "aws_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_elasticache_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + security_group_names = ["${aws_security_group.bar.name}"] +} + +resource "aws_elasticache_cluster" "bar" { + cluster_id = "tf-test-%03d" + engine = "redis" + node_type = "cache.m1.small" + num_cache_nodes = 1 + port = 6379 + parameter_group_name = "default.redis2.8" + security_group_names = ["${aws_elasticache_security_group.bar.name}"] + snapshot_window = "05:00-09:00" + snapshot_retention_limit = 3 +} +` + +var testAccAWSElasticacheClusterConfig_snapshotsUpdated = ` +provider "aws" { + region = "us-east-1" +} +resource "aws_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_elasticache_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + security_group_names = ["${aws_security_group.bar.name}"] +} + +resource "aws_elasticache_cluster" "bar" { + cluster_id = "tf-test-%03d" + engine = "redis" + node_type = "cache.m1.small" + num_cache_nodes = 1 + port = 6379 + parameter_group_name = "default.redis2.8" + security_group_names = ["${aws_elasticache_security_group.bar.name}"] + snapshot_window = "07:00-09:00" + snapshot_retention_limit = 7 + apply_immediately = true +} +` + var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(` resource "aws_vpc" "foo" { cidr_block = "192.168.0.0/16" diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index ef1d69ed4..e39d6172a 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -73,12 +73,22 @@ names to associate with this cache cluster Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. Example: `arn:aws:s3:::my_bucket/snapshot1.rdb` +* `snapshot_window` - (Optional) The daily time range (in UTC) during which ElastiCache will +begin taking a daily snapshot of your cache cluster. Can only be used for the Redis engine. Example: 05:00-09:00 + +* `snapshow_retention_limit` - (Optional) The number of days for which ElastiCache will +retain automatic cache cluster snapshots before deleting them. For example, if you set +SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days +before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. +Can only be used for the Redis engine. + * `notification_topic_arn` – (Optional) An Amazon Resource Name (ARN) of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` * `tags` - (Optional) A mapping of tags to assign to the resource. +~> **NOTE:** Snapshotting functionality is not compatible with t2 instance types. ## Attributes Reference