2015-04-26 03:53:21 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/awslabs/aws-sdk-go/aws"
|
|
|
|
"github.com/awslabs/aws-sdk-go/service/elasticache"
|
|
|
|
"github.com/hashicorp/terraform/helper/hashcode"
|
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
|
|
)
|
|
|
|
|
2015-04-28 18:30:15 +02:00
|
|
|
func resourceAwsElasticacheCluster() *schema.Resource {
|
2015-04-26 03:53:21 +02:00
|
|
|
return &schema.Resource{
|
2015-04-28 18:30:15 +02:00
|
|
|
Create: resourceAwsElasticacheClusterCreate,
|
|
|
|
Read: resourceAwsElasticacheClusterRead,
|
|
|
|
Delete: resourceAwsElasticacheClusterDelete,
|
2015-04-26 03:53:21 +02:00
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"cluster_id": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"engine": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"node_type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"num_cache_nodes": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"parameter_group_name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"port": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Default: 11211,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"engine_version": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2015-05-07 15:42:28 +02:00
|
|
|
Computed: true,
|
2015-04-26 03:53:21 +02:00
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"subnet_group_name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"security_group_names": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: func(v interface{}) int {
|
|
|
|
return hashcode.String(v.(string))
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"security_group_ids": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: func(v interface{}) int {
|
|
|
|
return hashcode.String(v.(string))
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 18:30:15 +02:00
|
|
|
func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{}) error {
|
2015-04-26 03:53:21 +02:00
|
|
|
conn := meta.(*AWSClient).elasticacheconn
|
|
|
|
|
|
|
|
clusterId := d.Get("cluster_id").(string)
|
|
|
|
nodeType := d.Get("node_type").(string) // e.g) cache.m1.small
|
|
|
|
numNodes := int64(d.Get("num_cache_nodes").(int)) // 2
|
|
|
|
engine := d.Get("engine").(string) // memcached
|
|
|
|
engineVersion := d.Get("engine_version").(string) // 1.4.14
|
|
|
|
port := int64(d.Get("port").(int)) // 11211
|
|
|
|
subnetGroupName := d.Get("subnet_group_name").(string)
|
|
|
|
securityNameSet := d.Get("security_group_names").(*schema.Set)
|
|
|
|
securityIdSet := d.Get("security_group_ids").(*schema.Set)
|
|
|
|
paramGroupName := d.Get("parameter_group_name").(string) // default.memcached1.4
|
|
|
|
|
|
|
|
securityNames := expandStringList(securityNameSet.List())
|
|
|
|
securityIds := expandStringList(securityIdSet.List())
|
|
|
|
|
|
|
|
req := &elasticache.CreateCacheClusterInput{
|
|
|
|
CacheClusterID: aws.String(clusterId),
|
|
|
|
CacheNodeType: aws.String(nodeType),
|
|
|
|
NumCacheNodes: aws.Long(numNodes),
|
|
|
|
Engine: aws.String(engine),
|
|
|
|
EngineVersion: aws.String(engineVersion),
|
|
|
|
Port: aws.Long(port),
|
|
|
|
CacheSubnetGroupName: aws.String(subnetGroupName),
|
|
|
|
CacheSecurityGroupNames: securityNames,
|
|
|
|
SecurityGroupIDs: securityIds,
|
|
|
|
CacheParameterGroupName: aws.String(paramGroupName),
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := conn.CreateCacheCluster(req)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating Elasticache: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pending := []string{"creating"}
|
|
|
|
stateConf := &resource.StateChangeConf{
|
|
|
|
Pending: pending,
|
|
|
|
Target: "available",
|
|
|
|
Refresh: CacheClusterStateRefreshFunc(conn, d.Id(), "available", pending),
|
|
|
|
Timeout: 10 * time.Minute,
|
|
|
|
Delay: 10 * time.Second,
|
|
|
|
MinTimeout: 3 * time.Second,
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id())
|
|
|
|
_, sterr := stateConf.WaitForState()
|
|
|
|
if sterr != nil {
|
|
|
|
return fmt.Errorf("Error waiting for elasticache (%s) to be created: %s", d.Id(), sterr)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId(clusterId)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-28 18:30:15 +02:00
|
|
|
func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) error {
|
2015-04-26 03:53:21 +02:00
|
|
|
conn := meta.(*AWSClient).elasticacheconn
|
|
|
|
req := &elasticache.DescribeCacheClustersInput{
|
|
|
|
CacheClusterID: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
|
|
|
|
res, err := conn.DescribeCacheClusters(req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(res.CacheClusters) == 1 {
|
|
|
|
c := res.CacheClusters[0]
|
|
|
|
d.Set("cluster_id", c.CacheClusterID)
|
|
|
|
d.Set("node_type", c.CacheNodeType)
|
|
|
|
d.Set("num_cache_nodes", c.NumCacheNodes)
|
|
|
|
d.Set("engine", c.Engine)
|
|
|
|
d.Set("engine_version", c.EngineVersion)
|
|
|
|
if c.ConfigurationEndpoint != nil {
|
|
|
|
d.Set("port", c.ConfigurationEndpoint.Port)
|
|
|
|
}
|
|
|
|
d.Set("subnet_group_name", c.CacheSubnetGroupName)
|
|
|
|
d.Set("security_group_names", c.CacheSecurityGroups)
|
|
|
|
d.Set("security_group_ids", c.SecurityGroups)
|
|
|
|
d.Set("parameter_group_name", c.CacheParameterGroup)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-28 18:30:15 +02:00
|
|
|
func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{}) error {
|
2015-04-26 03:53:21 +02:00
|
|
|
conn := meta.(*AWSClient).elasticacheconn
|
|
|
|
|
|
|
|
req := &elasticache.DeleteCacheClusterInput{
|
|
|
|
CacheClusterID: aws.String(d.Id()),
|
|
|
|
}
|
|
|
|
_, err := conn.DeleteCacheCluster(req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Waiting for deletion: %v", d.Id())
|
|
|
|
stateConf := &resource.StateChangeConf{
|
|
|
|
Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed"},
|
|
|
|
Target: "",
|
|
|
|
Refresh: CacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}),
|
|
|
|
Timeout: 10 * time.Minute,
|
|
|
|
Delay: 10 * time.Second,
|
|
|
|
MinTimeout: 3 * time.Second,
|
|
|
|
}
|
|
|
|
|
|
|
|
_, sterr := stateConf.WaitForState()
|
|
|
|
if sterr != nil {
|
|
|
|
return fmt.Errorf("Error waiting for elasticache (%s) to delete: %s", d.Id(), sterr)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId("")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func CacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, givenState string, pending []string) resource.StateRefreshFunc {
|
|
|
|
return func() (interface{}, string, error) {
|
|
|
|
resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{
|
|
|
|
CacheClusterID: aws.String(clusterID),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
apierr := err.(aws.APIError)
|
|
|
|
log.Printf("[DEBUG] message: %v, code: %v", apierr.Message, apierr.Code)
|
|
|
|
if apierr.Message == fmt.Sprintf("CacheCluster not found: %v", clusterID) {
|
|
|
|
log.Printf("[DEBUG] Detect deletion")
|
|
|
|
return nil, "", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[ERROR] CacheClusterStateRefreshFunc: %s", err)
|
|
|
|
return nil, "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
c := resp.CacheClusters[0]
|
|
|
|
log.Printf("[DEBUG] status: %v", *c.CacheClusterStatus)
|
|
|
|
|
|
|
|
// return the current state if it's in the pending array
|
|
|
|
for _, p := range pending {
|
|
|
|
s := *c.CacheClusterStatus
|
|
|
|
if p == s {
|
|
|
|
log.Printf("[DEBUG] Return with status: %v", *c.CacheClusterStatus)
|
|
|
|
return c, p, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// return given state if it's not in pending
|
|
|
|
if givenState != "" {
|
|
|
|
return c, givenState, nil
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] current status: %v", *c.CacheClusterStatus)
|
|
|
|
return c, *c.CacheClusterStatus, nil
|
|
|
|
}
|
|
|
|
}
|