Removing the instance_type check in the ElastiCache cluster creation. We now allow the error to bubble up to the userr when the wrong instance type is used. The limitation for t2 instance types now allowing snapshotting is also now documented

This commit is contained in:
stack72 2015-11-06 11:16:51 +00:00
parent ca2ea80af3
commit 350f91ec06
3 changed files with 21 additions and 52 deletions

View File

@ -205,7 +205,6 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
req.CacheParameterGroupName = aws.String(v.(string)) req.CacheParameterGroupName = aws.String(v.(string))
} }
if !strings.Contains(d.Get("node_type").(string), "cache.t2") {
if v, ok := d.GetOk("snapshot_retention_limit"); ok { if v, ok := d.GetOk("snapshot_retention_limit"); ok {
req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) req.SnapshotRetentionLimit = aws.Int64(int64(v.(int)))
} }
@ -213,7 +212,6 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
if v, ok := d.GetOk("snapshot_window"); ok { if v, ok := d.GetOk("snapshot_window"); ok {
req.SnapshotWindow = aws.String(v.(string)) req.SnapshotWindow = aws.String(v.(string))
} }
}
if v, ok := d.GetOk("maintenance_window"); ok { if v, ok := d.GetOk("maintenance_window"); ok {
req.PreferredMaintenanceWindow = aws.String(v.(string)) req.PreferredMaintenanceWindow = aws.String(v.(string))
@ -289,12 +287,8 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
d.Set("security_group_ids", c.SecurityGroups) d.Set("security_group_ids", c.SecurityGroups)
d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("parameter_group_name", c.CacheParameterGroup)
d.Set("maintenance_window", c.PreferredMaintenanceWindow) d.Set("maintenance_window", c.PreferredMaintenanceWindow)
if c.SnapshotWindow != nil {
d.Set("snapshot_window", c.SnapshotWindow) d.Set("snapshot_window", c.SnapshotWindow)
}
if c.SnapshotRetentionLimit != nil {
d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit)
}
if c.NotificationConfiguration != nil { if c.NotificationConfiguration != nil {
if *c.NotificationConfiguration.TopicStatus == "active" { if *c.NotificationConfiguration.TopicStatus == "active" {
d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn)
@ -377,7 +371,7 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
req.EngineVersion = aws.String(d.Get("engine_version").(string)) req.EngineVersion = aws.String(d.Get("engine_version").(string))
requestUpdate = true requestUpdate = true
} }
if !strings.Contains(d.Get("node_type").(string), "cache.t2") {
if d.HasChange("snapshot_window") { if d.HasChange("snapshot_window") {
req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string))
requestUpdate = true requestUpdate = true
@ -387,7 +381,6 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int)))
requestUpdate = true requestUpdate = true
} }
}
if d.HasChange("num_cache_nodes") { if d.HasChange("num_cache_nodes") {
req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))

View File

@ -33,37 +33,12 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) {
}) })
} }
func TestAccAWSElasticacheCluster_snapshots(t *testing.T) {
var ec elasticache.CacheCluster
ri := genRandInt()
config := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: config,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"),
),
},
},
})
}
func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) {
var ec elasticache.CacheCluster var ec elasticache.CacheCluster
ri := genRandInt() ri := genRandInt()
preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri, ri, ri)
postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri) postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri, ri, ri)
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -221,7 +196,7 @@ provider "aws" {
region = "us-east-1" region = "us-east-1"
} }
resource "aws_security_group" "bar" { resource "aws_security_group" "bar" {
name = "tf-test-security-group" name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr" description = "tf-test-security-group-descr"
ingress { ingress {
from_port = -1 from_port = -1
@ -232,7 +207,7 @@ resource "aws_security_group" "bar" {
} }
resource "aws_elasticache_security_group" "bar" { resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group" name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr" description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"] security_group_names = ["${aws_security_group.bar.name}"]
} }
@ -240,7 +215,7 @@ resource "aws_elasticache_security_group" "bar" {
resource "aws_elasticache_cluster" "bar" { resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d" cluster_id = "tf-test-%03d"
engine = "redis" engine = "redis"
node_type = "cache.m1.small" node_type = "cache.t2.small"
num_cache_nodes = 1 num_cache_nodes = 1
port = 6379 port = 6379
parameter_group_name = "default.redis2.8" parameter_group_name = "default.redis2.8"

View File

@ -88,6 +88,7 @@ SNS topic to send ElastiCache notifications to. Example:
* `tags` - (Optional) A mapping of tags to assign to the resource. * `tags` - (Optional) A mapping of tags to assign to the resource.
~> **NOTE:** Snapshotting functionality is not compatible with t2 instance types.
## Attributes Reference ## Attributes Reference