provider/aws: Implement the `aws_elasticache_replication_group` resource (#8275)
This commit is contained in:
parent
c61f04c89d
commit
51f216306f
|
@ -211,6 +211,7 @@ func Provider() terraform.ResourceProvider {
|
|||
"aws_eip_association": resourceAwsEipAssociation(),
|
||||
"aws_elasticache_cluster": resourceAwsElasticacheCluster(),
|
||||
"aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(),
|
||||
"aws_elasticache_replication_group": resourceAwsElasticacheReplicationGroup(),
|
||||
"aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(),
|
||||
"aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(),
|
||||
"aws_elastic_beanstalk_application": resourceAwsElasticBeanstalkApplication(),
|
||||
|
|
|
@ -14,184 +14,194 @@ import (
|
|||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceAwsElastiCacheCommonSchema() map[string]*schema.Schema {
|
||||
|
||||
return map[string]*schema.Schema{
|
||||
"availability_zones": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"node_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"engine": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"engine_version": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"parameter_group_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"subnet_group_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"security_group_names": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"security_group_ids": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
// A single-element string list containing an Amazon Resource Name (ARN) that
|
||||
// uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
|
||||
// file will be used to populate the node group.
|
||||
//
|
||||
// See also:
|
||||
// https://github.com/aws/aws-sdk-go/blob/4862a174f7fc92fb523fc39e68f00b87d91d2c3d/service/elasticache/api.go#L2079
|
||||
"snapshot_arns": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"snapshot_window": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"maintenance_window": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
StateFunc: func(val interface{}) string {
|
||||
// Elasticache always changes the maintenance
|
||||
// to lowercase
|
||||
return strings.ToLower(val.(string))
|
||||
},
|
||||
},
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"notification_topic_arn": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"snapshot_retention_limit": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||
value := v.(int)
|
||||
if value > 35 {
|
||||
es = append(es, fmt.Errorf(
|
||||
"snapshot retention limit cannot be more than 35 days"))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"apply_immediately": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"tags": tagsSchema(),
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAwsElasticacheCluster() *schema.Resource {
|
||||
resourceSchema := resourceAwsElastiCacheCommonSchema()
|
||||
|
||||
resourceSchema["cluster_id"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
StateFunc: func(val interface{}) string {
|
||||
// Elasticache normalizes cluster ids to lowercase,
|
||||
// so we have to do this too or else we can end up
|
||||
// with non-converging diffs.
|
||||
return strings.ToLower(val.(string))
|
||||
},
|
||||
ValidateFunc: validateElastiCacheClusterId,
|
||||
}
|
||||
|
||||
resourceSchema["num_cache_nodes"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
}
|
||||
|
||||
resourceSchema["az_mode"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
}
|
||||
|
||||
resourceSchema["availability_zone"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
}
|
||||
|
||||
resourceSchema["configuration_endpoint"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
}
|
||||
|
||||
resourceSchema["replication_group_id"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
}
|
||||
|
||||
resourceSchema["cache_nodes"] = &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"availability_zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &schema.Resource{
|
||||
Create: resourceAwsElasticacheClusterCreate,
|
||||
Read: resourceAwsElasticacheClusterRead,
|
||||
Update: resourceAwsElasticacheClusterUpdate,
|
||||
Delete: resourceAwsElasticacheClusterDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cluster_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
StateFunc: func(val interface{}) string {
|
||||
// Elasticache normalizes cluster ids to lowercase,
|
||||
// so we have to do this too or else we can end up
|
||||
// with non-converging diffs.
|
||||
return strings.ToLower(val.(string))
|
||||
},
|
||||
ValidateFunc: validateElastiCacheClusterId,
|
||||
},
|
||||
"configuration_endpoint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"engine": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"node_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"num_cache_nodes": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"parameter_group_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"engine_version": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"maintenance_window": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
StateFunc: func(val interface{}) string {
|
||||
// Elasticache always changes the maintenance
|
||||
// to lowercase
|
||||
return strings.ToLower(val.(string))
|
||||
},
|
||||
},
|
||||
"subnet_group_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"security_group_names": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"security_group_ids": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
// Exported Attributes
|
||||
"cache_nodes": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"availability_zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"notification_topic_arn": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
// A single-element string list containing an Amazon Resource Name (ARN) that
|
||||
// uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
|
||||
// file will be used to populate the node group.
|
||||
//
|
||||
// See also:
|
||||
// https://github.com/aws/aws-sdk-go/blob/4862a174f7fc92fb523fc39e68f00b87d91d2c3d/service/elasticache/api.go#L2079
|
||||
"snapshot_arns": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"snapshot_window": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"snapshot_retention_limit": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||
value := v.(int)
|
||||
if value > 35 {
|
||||
es = append(es, fmt.Errorf(
|
||||
"snapshot retention limit cannot be more than 35 days"))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"az_mode": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"availability_zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"availability_zones": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"tags": tagsSchema(),
|
||||
|
||||
// apply_immediately is used to determine when the update modifications
|
||||
// take place.
|
||||
// See http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html
|
||||
"apply_immediately": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
Schema: resourceSchema,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -267,6 +277,10 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
|
|||
req.PreferredAvailabilityZones = azs
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("replication_group_id"); ok {
|
||||
req.ReplicationGroupId = aws.String(v.(string))
|
||||
}
|
||||
|
||||
resp, err := conn.CreateCacheCluster(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Elasticache: %s", err)
|
||||
|
@ -283,9 +297,9 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
|
|||
Pending: pending,
|
||||
Target: []string{"available"},
|
||||
Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending),
|
||||
Timeout: 10 * time.Minute,
|
||||
Delay: 10 * time.Second,
|
||||
MinTimeout: 3 * time.Second,
|
||||
Timeout: 40 * time.Minute,
|
||||
MinTimeout: 10 * time.Second,
|
||||
Delay: 30 * time.Second,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id())
|
||||
|
@ -327,6 +341,10 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
|
|||
d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port)))
|
||||
}
|
||||
|
||||
if c.ReplicationGroupId != nil {
|
||||
d.Set("replication_group_id", c.ReplicationGroupId)
|
||||
}
|
||||
|
||||
d.Set("subnet_group_name", c.CacheSubnetGroupName)
|
||||
d.Set("security_group_names", c.CacheSecurityGroups)
|
||||
d.Set("security_group_ids", c.SecurityGroups)
|
||||
|
@ -423,6 +441,11 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
|
|||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("node_type") {
|
||||
req.CacheNodeType = aws.String(d.Get("node_type").(string))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("snapshot_retention_limit") {
|
||||
req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int)))
|
||||
requestUpdate = true
|
||||
|
@ -459,9 +482,9 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
|
|||
Pending: pending,
|
||||
Target: []string{"available"},
|
||||
Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending),
|
||||
Timeout: 5 * time.Minute,
|
||||
Delay: 5 * time.Second,
|
||||
MinTimeout: 3 * time.Second,
|
||||
Timeout: 80 * time.Minute,
|
||||
MinTimeout: 10 * time.Second,
|
||||
Delay: 30 * time.Second,
|
||||
}
|
||||
|
||||
_, sterr := stateConf.WaitForState()
|
||||
|
@ -530,9 +553,9 @@ func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{
|
|||
Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed"},
|
||||
Target: []string{},
|
||||
Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}),
|
||||
Timeout: 20 * time.Minute,
|
||||
Delay: 10 * time.Second,
|
||||
MinTimeout: 3 * time.Second,
|
||||
Timeout: 40 * time.Minute,
|
||||
MinTimeout: 10 * time.Second,
|
||||
Delay: 30 * time.Second,
|
||||
}
|
||||
|
||||
_, sterr := stateConf.WaitForState()
|
||||
|
|
|
@ -0,0 +1,426 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/elasticache"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceAwsElasticacheReplicationGroup() *schema.Resource {
|
||||
|
||||
resourceSchema := resourceAwsElastiCacheCommonSchema()
|
||||
|
||||
resourceSchema["replication_group_id"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateAwsElastiCacheReplicationGroupId,
|
||||
}
|
||||
|
||||
resourceSchema["automatic_failover_enabled"] = &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
}
|
||||
|
||||
resourceSchema["replication_group_description"] = &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
}
|
||||
|
||||
resourceSchema["number_cache_clusters"] = &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
}
|
||||
|
||||
resourceSchema["engine"].ValidateFunc = validateAwsElastiCacheReplicationGroupEngine
|
||||
|
||||
return &schema.Resource{
|
||||
Create: resourceAwsElasticacheReplicationGroupCreate,
|
||||
Read: resourceAwsElasticacheReplicationGroupRead,
|
||||
Update: resourceAwsElasticacheReplicationGroupUpdate,
|
||||
Delete: resourceAwsElasticacheReplicationGroupDelete,
|
||||
|
||||
Schema: resourceSchema,
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).elasticacheconn
|
||||
|
||||
tags := tagsFromMapEC(d.Get("tags").(map[string]interface{}))
|
||||
params := &elasticache.CreateReplicationGroupInput{
|
||||
ReplicationGroupId: aws.String(d.Get("replication_group_id").(string)),
|
||||
ReplicationGroupDescription: aws.String(d.Get("replication_group_description").(string)),
|
||||
AutomaticFailoverEnabled: aws.Bool(d.Get("automatic_failover_enabled").(bool)),
|
||||
CacheNodeType: aws.String(d.Get("node_type").(string)),
|
||||
Engine: aws.String(d.Get("engine").(string)),
|
||||
Port: aws.Int64(int64(d.Get("port").(int))),
|
||||
NumCacheClusters: aws.Int64(int64(d.Get("number_cache_clusters").(int))),
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("engine_version"); ok {
|
||||
params.EngineVersion = aws.String(v.(string))
|
||||
}
|
||||
|
||||
preferred_azs := d.Get("availability_zones").(*schema.Set).List()
|
||||
if len(preferred_azs) > 0 {
|
||||
azs := expandStringList(preferred_azs)
|
||||
params.PreferredCacheClusterAZs = azs
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("parameter_group_name"); ok {
|
||||
params.CacheParameterGroupName = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("subnet_group_name"); ok {
|
||||
params.CacheSubnetGroupName = aws.String(v.(string))
|
||||
}
|
||||
|
||||
security_group_names := d.Get("security_group_names").(*schema.Set).List()
|
||||
if len(security_group_names) > 0 {
|
||||
params.CacheSecurityGroupNames = expandStringList(security_group_names)
|
||||
}
|
||||
|
||||
security_group_ids := d.Get("security_group_ids").(*schema.Set).List()
|
||||
if len(security_group_ids) > 0 {
|
||||
params.SecurityGroupIds = expandStringList(security_group_ids)
|
||||
}
|
||||
|
||||
snaps := d.Get("snapshot_arns").(*schema.Set).List()
|
||||
if len(snaps) > 0 {
|
||||
params.SnapshotArns = expandStringList(snaps)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("maintenance_window"); ok {
|
||||
params.PreferredMaintenanceWindow = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("notification_topic_arn"); ok {
|
||||
params.NotificationTopicArn = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("snapshot_retention_limit"); ok {
|
||||
params.SnapshotRetentionLimit = aws.Int64(int64(v.(int)))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("snapshot_window"); ok {
|
||||
params.SnapshotWindow = aws.String(v.(string))
|
||||
}
|
||||
|
||||
resp, err := conn.CreateReplicationGroup(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Elasticache Replication Group: %s", err)
|
||||
}
|
||||
|
||||
d.SetId(*resp.ReplicationGroup.ReplicationGroupId)
|
||||
|
||||
pending := []string{"creating", "modifying"}
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Pending: pending,
|
||||
Target: []string{"available"},
|
||||
Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "available", pending),
|
||||
Timeout: 40 * time.Minute,
|
||||
MinTimeout: 10 * time.Second,
|
||||
Delay: 30 * time.Second,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id())
|
||||
_, sterr := stateConf.WaitForState()
|
||||
if sterr != nil {
|
||||
return fmt.Errorf("Error waiting for elasticache replication group (%s) to be created: %s", d.Id(), sterr)
|
||||
}
|
||||
|
||||
return resourceAwsElasticacheReplicationGroupRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).elasticacheconn
|
||||
req := &elasticache.DescribeReplicationGroupsInput{
|
||||
ReplicationGroupId: aws.String(d.Id()),
|
||||
}
|
||||
|
||||
res, err := conn.DescribeReplicationGroups(req)
|
||||
if err != nil {
|
||||
if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "ReplicationGroupNotFoundFault" {
|
||||
log.Printf("[WARN] Elasticache Replication Group (%s) not found", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var rgp *elasticache.ReplicationGroup
|
||||
for _, r := range res.ReplicationGroups {
|
||||
if *r.ReplicationGroupId == d.Id() {
|
||||
rgp = r
|
||||
}
|
||||
}
|
||||
|
||||
if rgp == nil {
|
||||
log.Printf("[WARN] Replication Group (%s) not found", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
if *rgp.Status == "deleting" {
|
||||
log.Printf("[WARN] The Replication Group %q is currently in the `deleting` state", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Set("automatic_failover_enabled", rgp.AutomaticFailover)
|
||||
d.Set("replication_group_description", rgp.Description)
|
||||
d.Set("number_cache_clusters", len(rgp.MemberClusters))
|
||||
d.Set("replication_group_id", rgp.ReplicationGroupId)
|
||||
|
||||
if rgp.NodeGroups != nil {
|
||||
cacheCluster := *rgp.NodeGroups[0].NodeGroupMembers[0]
|
||||
|
||||
res, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{
|
||||
CacheClusterId: cacheCluster.CacheClusterId,
|
||||
ShowCacheNodeInfo: aws.Bool(true),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(res.CacheClusters) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := res.CacheClusters[0]
|
||||
d.Set("node_type", c.CacheNodeType)
|
||||
d.Set("engine", c.Engine)
|
||||
d.Set("engine_version", c.EngineVersion)
|
||||
d.Set("subnet_group_name", c.CacheSubnetGroupName)
|
||||
d.Set("security_group_names", c.CacheSecurityGroups)
|
||||
d.Set("security_group_ids", c.SecurityGroups)
|
||||
d.Set("parameter_group_name", c.CacheParameterGroup)
|
||||
d.Set("maintenance_window", c.PreferredMaintenanceWindow)
|
||||
d.Set("snapshot_window", c.SnapshotWindow)
|
||||
d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit)
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).elasticacheconn
|
||||
|
||||
requestUpdate := false
|
||||
params := &elasticache.ModifyReplicationGroupInput{
|
||||
ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
|
||||
ReplicationGroupId: aws.String(d.Id()),
|
||||
}
|
||||
|
||||
if d.HasChange("replication_group_description") {
|
||||
params.ReplicationGroupDescription = aws.String(d.Get("replication_group_description").(string))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("automatic_failover_enabled") {
|
||||
params.AutomaticFailoverEnabled = aws.Bool(d.Get("automatic_failover_enabled").(bool))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("security_group_ids") {
|
||||
if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 {
|
||||
params.SecurityGroupIds = expandStringList(attr.List())
|
||||
requestUpdate = true
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("security_group_names") {
|
||||
if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
|
||||
params.CacheSecurityGroupNames = expandStringList(attr.List())
|
||||
requestUpdate = true
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("preferred_maintenance_window") {
|
||||
params.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("notification_topic_arn") {
|
||||
params.NotificationTopicArn = aws.String(d.Get("notification_topic_arn").(string))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("parameter_group_name") {
|
||||
params.CacheParameterGroupName = aws.String(d.Get("cache_parameter_group_name").(string))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("engine_version") {
|
||||
params.EngineVersion = aws.String(d.Get("engine_version").(string))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("snapshot_retention_limit") {
|
||||
params.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int)))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("snapshot_window") {
|
||||
params.SnapshotWindow = aws.String(d.Get("snapshot_window").(string))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("node_type") {
|
||||
params.CacheNodeType = aws.String(d.Get("node_type").(string))
|
||||
requestUpdate = true
|
||||
}
|
||||
|
||||
if requestUpdate {
|
||||
_, err := conn.ModifyReplicationGroup(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating Elasticache replication group: %s", err)
|
||||
}
|
||||
|
||||
pending := []string{"creating", "modifying", "snapshotting"}
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Pending: pending,
|
||||
Target: []string{"available"},
|
||||
Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "available", pending),
|
||||
Timeout: 40 * time.Minute,
|
||||
MinTimeout: 10 * time.Second,
|
||||
Delay: 30 * time.Second,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id())
|
||||
_, sterr := stateConf.WaitForState()
|
||||
if sterr != nil {
|
||||
return fmt.Errorf("Error waiting for elasticache replication group (%s) to be created: %s", d.Id(), sterr)
|
||||
}
|
||||
}
|
||||
return resourceAwsElasticacheReplicationGroupRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAwsElasticacheReplicationGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).elasticacheconn
|
||||
|
||||
req := &elasticache.DeleteReplicationGroupInput{
|
||||
ReplicationGroupId: aws.String(d.Id()),
|
||||
}
|
||||
|
||||
_, err := conn.DeleteReplicationGroup(req)
|
||||
if err != nil {
|
||||
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ReplicationGroupNotFoundFault" {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error deleting Elasticache replication group: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for deletion: %v", d.Id())
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Pending: []string{"creating", "available", "deleting"},
|
||||
Target: []string{},
|
||||
Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "", []string{}),
|
||||
Timeout: 40 * time.Minute,
|
||||
MinTimeout: 10 * time.Second,
|
||||
Delay: 30 * time.Second,
|
||||
}
|
||||
|
||||
_, sterr := stateConf.WaitForState()
|
||||
if sterr != nil {
|
||||
return fmt.Errorf("Error waiting for replication group (%s) to delete: %s", d.Id(), sterr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replicationGroupId, givenState string, pending []string) resource.StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
resp, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{
|
||||
ReplicationGroupId: aws.String(replicationGroupId),
|
||||
})
|
||||
if err != nil {
|
||||
if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "ReplicationGroupNotFoundFault" {
|
||||
log.Printf("[DEBUG] Replication Group Not Found")
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
log.Printf("[ERROR] cacheClusterReplicationGroupStateRefreshFunc: %s", err)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if len(resp.ReplicationGroups) == 0 {
|
||||
return nil, "", fmt.Errorf("[WARN] Error: no Cache Replication Groups found for id (%s)", replicationGroupId)
|
||||
}
|
||||
|
||||
var rg *elasticache.ReplicationGroup
|
||||
for _, replicationGroup := range resp.ReplicationGroups {
|
||||
if *replicationGroup.ReplicationGroupId == replicationGroupId {
|
||||
log.Printf("[DEBUG] Found matching ElastiCache Replication Group: %s", *replicationGroup.ReplicationGroupId)
|
||||
rg = replicationGroup
|
||||
}
|
||||
}
|
||||
|
||||
if rg == nil {
|
||||
return nil, "", fmt.Errorf("[WARN] Error: no matching ElastiCache Replication Group for id (%s)", replicationGroupId)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] ElastiCache Replication Group (%s) status: %v", replicationGroupId, *rg.Status)
|
||||
|
||||
// return the current state if it's in the pending array
|
||||
for _, p := range pending {
|
||||
log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for Replication Group (%s), Replication Group status: %s", pending, replicationGroupId, *rg.Status)
|
||||
s := *rg.Status
|
||||
if p == s {
|
||||
log.Printf("[DEBUG] Return with status: %v", *rg.Status)
|
||||
return s, p, nil
|
||||
}
|
||||
}
|
||||
|
||||
return rg, *rg.Status, nil
|
||||
}
|
||||
}
|
||||
|
||||
func validateAwsElastiCacheReplicationGroupEngine(v interface{}, k string) (ws []string, errors []error) {
|
||||
if strings.ToLower(v.(string)) != "redis" {
|
||||
errors = append(errors, fmt.Errorf("The only acceptable Engine type when using Replication Groups is Redis"))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateAwsElastiCacheReplicationGroupId(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if (len(value) < 1) || (len(value) > 20) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain from 1 to 20 alphanumeric characters or hyphens", k))
|
||||
}
|
||||
if !regexp.MustCompile(`^[0-9a-zA-Z-]+$`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"only alphanumeric characters and hyphens allowed in %q", k))
|
||||
}
|
||||
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"first character of %q must be a letter", k))
|
||||
}
|
||||
if regexp.MustCompile(`--`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot contain two consecutive hyphens", k))
|
||||
}
|
||||
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot end with a hyphen", k))
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,477 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/elasticache"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) {
|
||||
var rg elasticache.ReplicationGroup
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSElasticacheReplicationGroupConfig(acctest.RandString(10)),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) {
|
||||
var rg elasticache.ReplicationGroup
|
||||
rName := acctest.RandString(10)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSElasticacheReplicationGroupConfig(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "replication_group_description", "test description"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccAWSElasticacheReplicationGroupConfigUpdatedDescription(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "replication_group_description", "updated description"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) {
|
||||
var rg elasticache.ReplicationGroup
|
||||
rName := acctest.RandString(10)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSElasticacheReplicationGroupConfig(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "node_type", "cache.m1.small"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccAWSElasticacheReplicationGroupConfigUpdatedNodeSize(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "node_type", "cache.m1.medium"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) {
|
||||
var rg elasticache.ReplicationGroup
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSElasticacheReplicationGroupInVPCConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "number_cache_clusters", "1"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) {
|
||||
var rg elasticache.ReplicationGroup
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSElasticacheReplicationGroupMultiAZInVPCConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "number_cache_clusters", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elasticache_replication_group.bar", "automatic_failover_enabled", "true"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceAWSElastiCacheReplicationGroupIdValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
}{
|
||||
{
|
||||
Value: "tEsting",
|
||||
ErrCount: 0,
|
||||
},
|
||||
{
|
||||
Value: "t.sting",
|
||||
ErrCount: 1,
|
||||
},
|
||||
{
|
||||
Value: "t--sting",
|
||||
ErrCount: 1,
|
||||
},
|
||||
{
|
||||
Value: "1testing",
|
||||
ErrCount: 1,
|
||||
},
|
||||
{
|
||||
Value: "testing-",
|
||||
ErrCount: 1,
|
||||
},
|
||||
{
|
||||
Value: randomString(65),
|
||||
ErrCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
_, errors := validateAwsElastiCacheReplicationGroupId(tc.Value, "aws_elasticache_replication_group_replication_group_id")
|
||||
|
||||
if len(errors) != tc.ErrCount {
|
||||
t.Fatalf("Expected the ElastiCache Replication Group Id to trigger a validation error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceAWSElastiCacheReplicationGroupEngineValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
}{
|
||||
{
|
||||
Value: "Redis",
|
||||
ErrCount: 0,
|
||||
},
|
||||
{
|
||||
Value: "REDIS",
|
||||
ErrCount: 0,
|
||||
},
|
||||
{
|
||||
Value: "memcached",
|
||||
ErrCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
_, errors := validateAwsElastiCacheReplicationGroupEngine(tc.Value, "aws_elasticache_replication_group_engine")
|
||||
|
||||
if len(errors) != tc.ErrCount {
|
||||
t.Fatalf("Expected the ElastiCache Replication Group Engine to trigger a validation error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAWSElasticacheReplicationGroupExists(n string, v *elasticache.ReplicationGroup) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No replication group ID is set")
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AWSClient).elasticacheconn
|
||||
res, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{
|
||||
ReplicationGroupId: aws.String(rs.Primary.ID),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Elasticache error: %v", err)
|
||||
}
|
||||
|
||||
for _, rg := range res.ReplicationGroups {
|
||||
if *rg.ReplicationGroupId == rs.Primary.ID {
|
||||
*v = *rg
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAWSElasticacheReplicationDestroy(s *terraform.State) error {
|
||||
conn := testAccProvider.Meta().(*AWSClient).elasticacheconn
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "aws_elasticache_replication_group" {
|
||||
continue
|
||||
}
|
||||
res, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{
|
||||
ReplicationGroupId: aws.String(rs.Primary.ID),
|
||||
})
|
||||
if err != nil {
|
||||
// Verify the error is what we want
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ReplicationGroupNotFoundFault" {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(res.ReplicationGroups) > 0 {
|
||||
return fmt.Errorf("still exist.")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccAWSElasticacheReplicationGroupConfig(rName string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
resource "aws_security_group" "bar" {
|
||||
name = "tf-test-security-group-%s"
|
||||
description = "tf-test-security-group-descr"
|
||||
ingress {
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
protocol = "icmp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_security_group" "bar" {
|
||||
name = "tf-test-security-group-%s"
|
||||
description = "tf-test-security-group-descr"
|
||||
security_group_names = ["${aws_security_group.bar.name}"]
|
||||
}
|
||||
|
||||
resource "aws_elasticache_replication_group" "bar" {
|
||||
replication_group_id = "tf-%s"
|
||||
replication_group_description = "test description"
|
||||
engine = "redis"
|
||||
node_type = "cache.m1.small"
|
||||
number_cache_clusters = 2
|
||||
port = 6379
|
||||
parameter_group_name = "default.redis2.8"
|
||||
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
|
||||
apply_immediately = true
|
||||
}`, rName, rName, rName)
|
||||
}
|
||||
|
||||
func testAccAWSElasticacheReplicationGroupConfigUpdatedDescription(rName string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
resource "aws_security_group" "bar" {
|
||||
name = "tf-test-security-group-%s"
|
||||
description = "tf-test-security-group-descr"
|
||||
ingress {
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
protocol = "icmp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_security_group" "bar" {
|
||||
name = "tf-test-security-group-%s"
|
||||
description = "tf-test-security-group-descr"
|
||||
security_group_names = ["${aws_security_group.bar.name}"]
|
||||
}
|
||||
|
||||
resource "aws_elasticache_replication_group" "bar" {
|
||||
replication_group_id = "tf-%s"
|
||||
replication_group_description = "updated description"
|
||||
engine = "redis"
|
||||
node_type = "cache.m1.small"
|
||||
number_cache_clusters = 2
|
||||
port = 6379
|
||||
parameter_group_name = "default.redis2.8"
|
||||
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
|
||||
apply_immediately = true
|
||||
}`, rName, rName, rName)
|
||||
}
|
||||
|
||||
func testAccAWSElasticacheReplicationGroupConfigUpdatedNodeSize(rName string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
resource "aws_security_group" "bar" {
|
||||
name = "tf-test-security-group-%s"
|
||||
description = "tf-test-security-group-descr"
|
||||
ingress {
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
protocol = "icmp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_security_group" "bar" {
|
||||
name = "tf-test-security-group-%s"
|
||||
description = "tf-test-security-group-descr"
|
||||
security_group_names = ["${aws_security_group.bar.name}"]
|
||||
}
|
||||
|
||||
resource "aws_elasticache_replication_group" "bar" {
|
||||
replication_group_id = "tf-%s"
|
||||
replication_group_description = "updated description"
|
||||
engine = "redis"
|
||||
node_type = "cache.m1.medium"
|
||||
number_cache_clusters = 2
|
||||
port = 6379
|
||||
parameter_group_name = "default.redis2.8"
|
||||
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
|
||||
apply_immediately = true
|
||||
}`, rName, rName, rName)
|
||||
}
|
||||
|
||||
var testAccAWSElasticacheReplicationGroupInVPCConfig = fmt.Sprintf(`
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "192.168.0.0/16"
|
||||
tags {
|
||||
Name = "tf-test"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "foo" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
cidr_block = "192.168.0.0/20"
|
||||
availability_zone = "us-west-2a"
|
||||
tags {
|
||||
Name = "tf-test"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_subnet_group" "bar" {
|
||||
name = "tf-test-cache-subnet-%03d"
|
||||
description = "tf-test-cache-subnet-group-descr"
|
||||
subnet_ids = ["${aws_subnet.foo.id}"]
|
||||
}
|
||||
|
||||
resource "aws_security_group" "bar" {
|
||||
name = "tf-test-security-group-%03d"
|
||||
description = "tf-test-security-group-descr"
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
ingress {
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
protocol = "icmp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_replication_group" "bar" {
|
||||
replication_group_id = "tf-%s"
|
||||
replication_group_description = "test description"
|
||||
node_type = "cache.m1.small"
|
||||
number_cache_clusters = 1
|
||||
engine = "redis"
|
||||
port = 6379
|
||||
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
|
||||
security_group_ids = ["${aws_security_group.bar.id}"]
|
||||
parameter_group_name = "default.redis2.8"
|
||||
availability_zones = ["us-west-2a"]
|
||||
}
|
||||
|
||||
`, acctest.RandInt(), acctest.RandInt(), acctest.RandString(10))
|
||||
|
||||
var testAccAWSElasticacheReplicationGroupMultiAZInVPCConfig = fmt.Sprintf(`
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "192.168.0.0/16"
|
||||
tags {
|
||||
Name = "tf-test"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "foo" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
cidr_block = "192.168.0.0/20"
|
||||
availability_zone = "us-west-2a"
|
||||
tags {
|
||||
Name = "tf-test-%03d"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "bar" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
cidr_block = "192.168.16.0/20"
|
||||
availability_zone = "us-west-2b"
|
||||
tags {
|
||||
Name = "tf-test-%03d"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_subnet_group" "bar" {
|
||||
name = "tf-test-cache-subnet-%03d"
|
||||
description = "tf-test-cache-subnet-group-descr"
|
||||
subnet_ids = [
|
||||
"${aws_subnet.foo.id}",
|
||||
"${aws_subnet.bar.id}"
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_security_group" "bar" {
|
||||
name = "tf-test-security-group-%03d"
|
||||
description = "tf-test-security-group-descr"
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
ingress {
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
protocol = "icmp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_replication_group" "bar" {
|
||||
replication_group_id = "tf-%s"
|
||||
replication_group_description = "test description"
|
||||
node_type = "cache.m1.small"
|
||||
number_cache_clusters = 2
|
||||
engine = "redis"
|
||||
port = 6379
|
||||
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
|
||||
security_group_ids = ["${aws_security_group.bar.id}"]
|
||||
parameter_group_name = "default.redis2.8"
|
||||
availability_zones = ["us-west-2a","us-west-2b"]
|
||||
automatic_failover_enabled = true
|
||||
}
|
||||
`, acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandString(10))
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
layout: "aws"
|
||||
page_title: "AWS: aws_elasticache_replication_group"
|
||||
sidebar_current: "docs-aws-resource-elasticache-replication-group"
|
||||
description: |-
|
||||
Provides an ElastiCache Replication Group resource.
|
||||
---
|
||||
|
||||
# aws\_elasticache\_replication\_group
|
||||
|
||||
Provides an ElastiCache Replication Group resource.
|
||||
|
||||
~> **Note:** We currently do not support passing a `primary_cluster_id` in order to create the Replication Group.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
resource "aws_elasticache_replication_group" "bar" {
|
||||
replication_group_id = "tf-replication-group-1"
|
||||
replication_group_description = "test description"
|
||||
node_type = "cache.m1.small"
|
||||
number_cache_clusters = 2
|
||||
engine = "redis"
|
||||
port = 6379
|
||||
parameter_group_name = "default.redis2.8"
|
||||
availability_zones = ["us-west-2a", "us-west-2b"]
|
||||
automatic_failover_enabled = true
|
||||
}
|
||||
```
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `replication_group_id` – (Required) The replication group identifier. This parameter is stored as a lowercase string.
|
||||
* `replication_group_description` – (Required) A user-created description for the replication group.
|
||||
* `number_cache_clusters` - (Required) The number of cache clusters this replication group will initially have.
|
||||
If Multi-AZ is enabled , the value of this parameter must be at least 2. Changing this number will force a new resource
|
||||
* `node_type` - (Required) The compute and memory capacity of the nodes in the node group.
|
||||
* `engine` - (Required) The name of the cache engine to be used for the cache clusters in this replication group. The only valid value is Redis.
|
||||
* `automatic_failover_enabled` - (Optional) Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. Defaults to `false`.
|
||||
* `availability_zones` - (Optional) A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important.
|
||||
* `engine_version` - (Optional) The version number of the cache engine to be used for the cache clusters in this replication group.
|
||||
* `parameter_group_name` - (Optional) The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
|
||||
* `subnet_group_name` - (Optional) The name of the cache subnet group to be used for the replication group.
|
||||
* `security_group_names` - (Optional) A list of cache security group names to associate with this replication group.
|
||||
* `security_group_ids` - (Optional) One or more Amazon VPC security groups associated with this replication group.
|
||||
* `snapshot_arns` – (Optional) A single-element string list containing an
|
||||
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
|
||||
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
|
||||
* `maintenance_window` – (Optional) Specifies the weekly time range for when maintenance
|
||||
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
|
||||
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
|
||||
* `notification_topic_arn` – (Optional) An Amazon Resource Name (ARN) of an
|
||||
SNS topic to send ElastiCache notifications to. Example:
|
||||
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
|
||||
* `snapshot_window` - (Optional, Redis only) The daily time range (in UTC) during which ElastiCache will
|
||||
begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
|
||||
* `snapshot_retention_limit` - (Optional, Redis only) The number of days for which ElastiCache will
|
||||
retain automatic cache cluster snapshots before deleting them. For example, if you set
|
||||
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
|
||||
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
|
||||
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
|
||||
* `apply_immediately` - (Optional) Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`.
|
||||
* `tags` - (Optional) A mapping of tags to assign to the resource
|
||||
|
||||
## Attributes Reference
|
||||
|
||||
The following attributes are exported:
|
||||
|
||||
* `id` - The ID of the ElastiCache Replication Group
|
|
@ -388,6 +388,10 @@
|
|||
<a href="/docs/providers/aws/r/elasticache_parameter_group.html">aws_elasticache_parameter_group</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-aws-resource-elasticache-replication-group") %>>
|
||||
<a href="/docs/providers/aws/r/elasticache_replication_group.html">aws_elasticache_replication_group</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-aws-resource-elasticache-security-group") %>>
|
||||
<a href="/docs/providers/aws/r/elasticache_security_group.html">aws_elasticache_security_group</a>
|
||||
</li>
|
||||
|
|
Loading…
Reference in New Issue