go fmt after rebase
This commit is contained in:
parent
7abe2a10e7
commit
71b1cb1289
|
@ -1,347 +1,347 @@
|
||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/rds"
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceAwsRDSCluster() *schema.Resource {
|
func resourceAwsRDSCluster() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsRDSClusterCreate,
|
Create: resourceAwsRDSClusterCreate,
|
||||||
Read: resourceAwsRDSClusterRead,
|
Read: resourceAwsRDSClusterRead,
|
||||||
Update: resourceAwsRDSClusterUpdate,
|
Update: resourceAwsRDSClusterUpdate,
|
||||||
Delete: resourceAwsRDSClusterDelete,
|
Delete: resourceAwsRDSClusterDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
|
||||||
"availability_zones": &schema.Schema{
|
"availability_zones": &schema.Schema{
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"cluster_identifier": &schema.Schema{
|
"cluster_identifier": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateRdsId,
|
ValidateFunc: validateRdsId,
|
||||||
},
|
},
|
||||||
|
|
||||||
"cluster_members": &schema.Schema{
|
"cluster_members": &schema.Schema{
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"database_name": &schema.Schema{
|
"database_name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"db_subnet_group_name": &schema.Schema{
|
"db_subnet_group_name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"endpoint": &schema.Schema{
|
"endpoint": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"engine": &schema.Schema{
|
"engine": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"final_snapshot_identifier": &schema.Schema{
|
"final_snapshot_identifier": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||||
value := v.(string)
|
value := v.(string)
|
||||||
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
||||||
es = append(es, fmt.Errorf(
|
es = append(es, fmt.Errorf(
|
||||||
"only alphanumeric characters and hyphens allowed in %q", k))
|
"only alphanumeric characters and hyphens allowed in %q", k))
|
||||||
}
|
}
|
||||||
if regexp.MustCompile(`--`).MatchString(value) {
|
if regexp.MustCompile(`--`).MatchString(value) {
|
||||||
es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k))
|
es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k))
|
||||||
}
|
}
|
||||||
if regexp.MustCompile(`-$`).MatchString(value) {
|
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||||
es = append(es, fmt.Errorf("%q cannot end in a hyphen", k))
|
es = append(es, fmt.Errorf("%q cannot end in a hyphen", k))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"master_username": &schema.Schema{
|
"master_username": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"master_password": &schema.Schema{
|
"master_password": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"port": &schema.Schema{
|
"port": &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
// apply_immediately is used to determine when the update modifications
|
// apply_immediately is used to determine when the update modifications
|
||||||
// take place.
|
// take place.
|
||||||
// See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html
|
// See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html
|
||||||
"apply_immediately": &schema.Schema{
|
"apply_immediately": &schema.Schema{
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"vpc_security_group_ids": &schema.Schema{
|
"vpc_security_group_ids": &schema.Schema{
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
createOpts := &rds.CreateDBClusterInput{
|
createOpts := &rds.CreateDBClusterInput{
|
||||||
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
||||||
Engine: aws.String("aurora"),
|
Engine: aws.String("aurora"),
|
||||||
MasterUserPassword: aws.String(d.Get("master_password").(string)),
|
MasterUserPassword: aws.String(d.Get("master_password").(string)),
|
||||||
MasterUsername: aws.String(d.Get("master_username").(string)),
|
MasterUsername: aws.String(d.Get("master_username").(string)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if v := d.Get("database_name"); v.(string) != "" {
|
if v := d.Get("database_name"); v.(string) != "" {
|
||||||
createOpts.DatabaseName = aws.String(v.(string))
|
createOpts.DatabaseName = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr, ok := d.GetOk("port"); ok {
|
if attr, ok := d.GetOk("port"); ok {
|
||||||
createOpts.Port = aws.Int64(int64(attr.(int)))
|
createOpts.Port = aws.Int64(int64(attr.(int)))
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
|
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
|
||||||
createOpts.DBSubnetGroupName = aws.String(attr.(string))
|
createOpts.DBSubnetGroupName = aws.String(attr.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
||||||
createOpts.VpcSecurityGroupIds = expandStringList(attr.List())
|
createOpts.VpcSecurityGroupIds = expandStringList(attr.List())
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 {
|
if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 {
|
||||||
createOpts.AvailabilityZones = expandStringList(attr.List())
|
createOpts.AvailabilityZones = expandStringList(attr.List())
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
|
log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
|
||||||
resp, err := conn.CreateDBCluster(createOpts)
|
resp, err := conn.CreateDBCluster(createOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[ERROR] Error creating RDS Cluster: %s", err)
|
log.Printf("[ERROR] Error creating RDS Cluster: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG]: Cluster create response: %s", resp)
|
log.Printf("[DEBUG]: Cluster create response: %s", resp)
|
||||||
d.SetId(*resp.DBCluster.DBClusterIdentifier)
|
d.SetId(*resp.DBCluster.DBClusterIdentifier)
|
||||||
stateConf := &resource.StateChangeConf{
|
stateConf := &resource.StateChangeConf{
|
||||||
Pending: []string{"creating", "backing-up", "modifying"},
|
Pending: []string{"creating", "backing-up", "modifying"},
|
||||||
Target: "available",
|
Target: "available",
|
||||||
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
|
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
|
||||||
Timeout: 5 * time.Minute,
|
Timeout: 5 * time.Minute,
|
||||||
MinTimeout: 3 * time.Second,
|
MinTimeout: 3 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait, catching any errors
|
// Wait, catching any errors
|
||||||
_, err = stateConf.WaitForState()
|
_, err = stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err)
|
return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resourceAwsRDSClusterRead(d, meta)
|
return resourceAwsRDSClusterRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
||||||
DBClusterIdentifier: aws.String(d.Id()),
|
DBClusterIdentifier: aws.String(d.Id()),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
if "DBClusterNotFoundFault" == awsErr.Code() {
|
if "DBClusterNotFoundFault" == awsErr.Code() {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id())
|
log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id())
|
log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var dbc *rds.DBCluster
|
var dbc *rds.DBCluster
|
||||||
for _, c := range resp.DBClusters {
|
for _, c := range resp.DBClusters {
|
||||||
if *c.DBClusterIdentifier == d.Id() {
|
if *c.DBClusterIdentifier == d.Id() {
|
||||||
dbc = c
|
dbc = c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if dbc == nil {
|
if dbc == nil {
|
||||||
log.Printf("[WARN] RDS Cluster (%s) not found", d.Id())
|
log.Printf("[WARN] RDS Cluster (%s) not found", d.Id())
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil {
|
if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil {
|
||||||
return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err)
|
return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err)
|
||||||
}
|
}
|
||||||
d.Set("database_name", dbc.DatabaseName)
|
d.Set("database_name", dbc.DatabaseName)
|
||||||
d.Set("db_subnet_group_name", dbc.DBSubnetGroup)
|
d.Set("db_subnet_group_name", dbc.DBSubnetGroup)
|
||||||
d.Set("endpoint", dbc.Endpoint)
|
d.Set("endpoint", dbc.Endpoint)
|
||||||
d.Set("engine", dbc.Engine)
|
d.Set("engine", dbc.Engine)
|
||||||
d.Set("master_username", dbc.MasterUsername)
|
d.Set("master_username", dbc.MasterUsername)
|
||||||
d.Set("port", dbc.Port)
|
d.Set("port", dbc.Port)
|
||||||
|
|
||||||
var vpcg []string
|
var vpcg []string
|
||||||
for _, g := range dbc.VpcSecurityGroups {
|
for _, g := range dbc.VpcSecurityGroups {
|
||||||
vpcg = append(vpcg, *g.VpcSecurityGroupId)
|
vpcg = append(vpcg, *g.VpcSecurityGroupId)
|
||||||
}
|
}
|
||||||
if err := d.Set("vpc_security_group_ids", vpcg); err != nil {
|
if err := d.Set("vpc_security_group_ids", vpcg); err != nil {
|
||||||
return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err)
|
return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var cm []string
|
var cm []string
|
||||||
for _, m := range dbc.DBClusterMembers {
|
for _, m := range dbc.DBClusterMembers {
|
||||||
cm = append(cm, *m.DBInstanceIdentifier)
|
cm = append(cm, *m.DBInstanceIdentifier)
|
||||||
}
|
}
|
||||||
if err := d.Set("cluster_members", cm); err != nil {
|
if err := d.Set("cluster_members", cm); err != nil {
|
||||||
return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err)
|
return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
req := &rds.ModifyDBClusterInput{
|
req := &rds.ModifyDBClusterInput{
|
||||||
ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
|
ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
|
||||||
DBClusterIdentifier: aws.String(d.Id()),
|
DBClusterIdentifier: aws.String(d.Id()),
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("master_password") {
|
if d.HasChange("master_password") {
|
||||||
req.MasterUserPassword = aws.String(d.Get("master_password").(string))
|
req.MasterUserPassword = aws.String(d.Get("master_password").(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("vpc_security_group_ids") {
|
if d.HasChange("vpc_security_group_ids") {
|
||||||
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
||||||
req.VpcSecurityGroupIds = expandStringList(attr.List())
|
req.VpcSecurityGroupIds = expandStringList(attr.List())
|
||||||
} else {
|
} else {
|
||||||
req.VpcSecurityGroupIds = []*string{}
|
req.VpcSecurityGroupIds = []*string{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := conn.ModifyDBCluster(req)
|
_, err := conn.ModifyDBCluster(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err)
|
return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resourceAwsRDSClusterRead(d, meta)
|
return resourceAwsRDSClusterRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id())
|
log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id())
|
||||||
|
|
||||||
deleteOpts := rds.DeleteDBClusterInput{
|
deleteOpts := rds.DeleteDBClusterInput{
|
||||||
DBClusterIdentifier: aws.String(d.Id()),
|
DBClusterIdentifier: aws.String(d.Id()),
|
||||||
}
|
}
|
||||||
|
|
||||||
finalSnapshot := d.Get("final_snapshot_identifier").(string)
|
finalSnapshot := d.Get("final_snapshot_identifier").(string)
|
||||||
if finalSnapshot == "" {
|
if finalSnapshot == "" {
|
||||||
deleteOpts.SkipFinalSnapshot = aws.Bool(true)
|
deleteOpts.SkipFinalSnapshot = aws.Bool(true)
|
||||||
} else {
|
} else {
|
||||||
deleteOpts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
|
deleteOpts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
|
||||||
deleteOpts.SkipFinalSnapshot = aws.Bool(false)
|
deleteOpts.SkipFinalSnapshot = aws.Bool(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts)
|
log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts)
|
||||||
_, err := conn.DeleteDBCluster(&deleteOpts)
|
_, err := conn.DeleteDBCluster(&deleteOpts)
|
||||||
|
|
||||||
stateConf := &resource.StateChangeConf{
|
stateConf := &resource.StateChangeConf{
|
||||||
Pending: []string{"deleting", "backing-up", "modifying"},
|
Pending: []string{"deleting", "backing-up", "modifying"},
|
||||||
Target: "destroyed",
|
Target: "destroyed",
|
||||||
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
|
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
|
||||||
Timeout: 5 * time.Minute,
|
Timeout: 5 * time.Minute,
|
||||||
MinTimeout: 3 * time.Second,
|
MinTimeout: 3 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait, catching any errors
|
// Wait, catching any errors
|
||||||
_, err = stateConf.WaitForState()
|
_, err = stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err)
|
return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterStateRefreshFunc(
|
func resourceAwsRDSClusterStateRefreshFunc(
|
||||||
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
|
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
|
||||||
return func() (interface{}, string, error) {
|
return func() (interface{}, string, error) {
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
||||||
DBClusterIdentifier: aws.String(d.Id()),
|
DBClusterIdentifier: aws.String(d.Id()),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
if "DBClusterNotFoundFault" == awsErr.Code() {
|
if "DBClusterNotFoundFault" == awsErr.Code() {
|
||||||
return 42, "destroyed", nil
|
return 42, "destroyed", nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err)
|
log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err)
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
var dbc *rds.DBCluster
|
var dbc *rds.DBCluster
|
||||||
|
|
||||||
for _, c := range resp.DBClusters {
|
for _, c := range resp.DBClusters {
|
||||||
if *c.DBClusterIdentifier == d.Id() {
|
if *c.DBClusterIdentifier == d.Id() {
|
||||||
dbc = c
|
dbc = c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if dbc == nil {
|
if dbc == nil {
|
||||||
return 42, "destroyed", nil
|
return 42, "destroyed", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if dbc.Status != nil {
|
if dbc.Status != nil {
|
||||||
log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status)
|
log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
return dbc, *dbc.Status, nil
|
return dbc, *dbc.Status, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,220 +1,220 @@
|
||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/rds"
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceAwsRDSClusterInstance() *schema.Resource {
|
func resourceAwsRDSClusterInstance() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsRDSClusterInstanceCreate,
|
Create: resourceAwsRDSClusterInstanceCreate,
|
||||||
Read: resourceAwsRDSClusterInstanceRead,
|
Read: resourceAwsRDSClusterInstanceRead,
|
||||||
Update: resourceAwsRDSClusterInstanceUpdate,
|
Update: resourceAwsRDSClusterInstanceUpdate,
|
||||||
Delete: resourceAwsRDSClusterInstanceDelete,
|
Delete: resourceAwsRDSClusterInstanceDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"identifier": &schema.Schema{
|
"identifier": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateRdsId,
|
ValidateFunc: validateRdsId,
|
||||||
},
|
},
|
||||||
|
|
||||||
"db_subnet_group_name": &schema.Schema{
|
"db_subnet_group_name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"writer": &schema.Schema{
|
"writer": &schema.Schema{
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"cluster_identifier": &schema.Schema{
|
"cluster_identifier": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"endpoint": &schema.Schema{
|
"endpoint": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"port": &schema.Schema{
|
"port": &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"publicly_accessible": &schema.Schema{
|
"publicly_accessible": &schema.Schema{
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"instance_class": &schema.Schema{
|
"instance_class": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"tags": tagsSchema(),
|
"tags": tagsSchema(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
||||||
|
|
||||||
createOpts := &rds.CreateDBInstanceInput{
|
createOpts := &rds.CreateDBInstanceInput{
|
||||||
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
|
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
|
||||||
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
||||||
Engine: aws.String("aurora"),
|
Engine: aws.String("aurora"),
|
||||||
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
|
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
}
|
}
|
||||||
|
|
||||||
if v := d.Get("identifier").(string); v != "" {
|
if v := d.Get("identifier").(string); v != "" {
|
||||||
createOpts.DBInstanceIdentifier = aws.String(v)
|
createOpts.DBInstanceIdentifier = aws.String(v)
|
||||||
} else {
|
} else {
|
||||||
createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId())
|
createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId())
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
|
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
|
||||||
createOpts.DBSubnetGroupName = aws.String(attr.(string))
|
createOpts.DBSubnetGroupName = aws.String(attr.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts)
|
log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts)
|
||||||
resp, err := conn.CreateDBInstance(createOpts)
|
resp, err := conn.CreateDBInstance(createOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(*resp.DBInstance.DBInstanceIdentifier)
|
d.SetId(*resp.DBInstance.DBInstanceIdentifier)
|
||||||
|
|
||||||
// reuse db_instance refresh func
|
// reuse db_instance refresh func
|
||||||
stateConf := &resource.StateChangeConf{
|
stateConf := &resource.StateChangeConf{
|
||||||
Pending: []string{"creating", "backing-up", "modifying"},
|
Pending: []string{"creating", "backing-up", "modifying"},
|
||||||
Target: "available",
|
Target: "available",
|
||||||
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
|
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
|
||||||
Timeout: 40 * time.Minute,
|
Timeout: 40 * time.Minute,
|
||||||
MinTimeout: 10 * time.Second,
|
MinTimeout: 10 * time.Second,
|
||||||
Delay: 10 * time.Second,
|
Delay: 10 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait, catching any errors
|
// Wait, catching any errors
|
||||||
_, err = stateConf.WaitForState()
|
_, err = stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return resourceAwsRDSClusterInstanceRead(d, meta)
|
return resourceAwsRDSClusterInstanceRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
db, err := resourceAwsDbInstanceRetrieve(d, meta)
|
db, err := resourceAwsDbInstanceRetrieve(d, meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err)
|
log.Printf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err)
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retreive DB Cluster information, to determine if this Instance is a writer
|
// Retreive DB Cluster information, to determine if this Instance is a writer
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
||||||
DBClusterIdentifier: db.DBClusterIdentifier,
|
DBClusterIdentifier: db.DBClusterIdentifier,
|
||||||
})
|
})
|
||||||
|
|
||||||
var dbc *rds.DBCluster
|
var dbc *rds.DBCluster
|
||||||
for _, c := range resp.DBClusters {
|
for _, c := range resp.DBClusters {
|
||||||
if *c.DBClusterIdentifier == *db.DBClusterIdentifier {
|
if *c.DBClusterIdentifier == *db.DBClusterIdentifier {
|
||||||
dbc = c
|
dbc = c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if dbc == nil {
|
if dbc == nil {
|
||||||
return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s",
|
return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s",
|
||||||
*db.DBClusterIdentifier, *db.DBInstanceIdentifier, err)
|
*db.DBClusterIdentifier, *db.DBInstanceIdentifier, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range dbc.DBClusterMembers {
|
for _, m := range dbc.DBClusterMembers {
|
||||||
if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier {
|
if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier {
|
||||||
if *m.IsClusterWriter == true {
|
if *m.IsClusterWriter == true {
|
||||||
d.Set("writer", true)
|
d.Set("writer", true)
|
||||||
} else {
|
} else {
|
||||||
d.Set("writer", false)
|
d.Set("writer", false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.Endpoint != nil {
|
if db.Endpoint != nil {
|
||||||
d.Set("endpoint", db.Endpoint.Address)
|
d.Set("endpoint", db.Endpoint.Address)
|
||||||
d.Set("port", db.Endpoint.Port)
|
d.Set("port", db.Endpoint.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("publicly_accessible", db.PubliclyAccessible)
|
d.Set("publicly_accessible", db.PubliclyAccessible)
|
||||||
|
|
||||||
// Fetch and save tags
|
// Fetch and save tags
|
||||||
arn, err := buildRDSARN(d, meta)
|
arn, err := buildRDSARN(d, meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier)
|
log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier)
|
||||||
} else {
|
} else {
|
||||||
if err := saveTagsRDS(conn, d, arn); err != nil {
|
if err := saveTagsRDS(conn, d, arn); err != nil {
|
||||||
log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err)
|
log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
if arn, err := buildRDSARN(d, meta); err == nil {
|
if arn, err := buildRDSARN(d, meta); err == nil {
|
||||||
if err := setTagsRDS(conn, d, arn); err != nil {
|
if err := setTagsRDS(conn, d, arn); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return resourceAwsRDSClusterInstanceRead(d, meta)
|
return resourceAwsRDSClusterInstanceRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRDSClusterInstanceDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRDSClusterInstanceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
|
|
||||||
log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id())
|
log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id())
|
||||||
|
|
||||||
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
|
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
|
||||||
|
|
||||||
log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts)
|
log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts)
|
||||||
if _, err := conn.DeleteDBInstance(&opts); err != nil {
|
if _, err := conn.DeleteDBInstance(&opts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// re-uses db_instance refresh func
|
// re-uses db_instance refresh func
|
||||||
log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed")
|
log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed")
|
||||||
stateConf := &resource.StateChangeConf{
|
stateConf := &resource.StateChangeConf{
|
||||||
Pending: []string{"modifying", "deleting"},
|
Pending: []string{"modifying", "deleting"},
|
||||||
Target: "",
|
Target: "",
|
||||||
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
|
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
|
||||||
Timeout: 40 * time.Minute,
|
Timeout: 40 * time.Minute,
|
||||||
MinTimeout: 10 * time.Second,
|
MinTimeout: 10 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := stateConf.WaitForState(); err != nil {
|
if _, err := stateConf.WaitForState(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,118 +1,118 @@
|
||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/rds"
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccAWSRDSClusterInstance_basic(t *testing.T) {
|
func TestAccAWSRDSClusterInstance_basic(t *testing.T) {
|
||||||
var v rds.DBInstance
|
var v rds.DBInstance
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSClusterDestroy,
|
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSClusterInstanceConfig,
|
Config: testAccAWSClusterInstanceConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v),
|
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v),
|
||||||
testAccCheckAWSDBClusterInstanceAttributes(&v),
|
testAccCheckAWSDBClusterInstanceAttributes(&v),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSClusterInstanceDestroy(s *terraform.State) error {
|
func testAccCheckAWSClusterInstanceDestroy(s *terraform.State) error {
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
if rs.Type != "aws_rds_cluster" {
|
if rs.Type != "aws_rds_cluster" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to find the Group
|
// Try to find the Group
|
||||||
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
var err error
|
var err error
|
||||||
resp, err := conn.DescribeDBInstances(
|
resp, err := conn.DescribeDBInstances(
|
||||||
&rds.DescribeDBInstancesInput{
|
&rds.DescribeDBInstancesInput{
|
||||||
DBInstanceIdentifier: aws.String(rs.Primary.ID),
|
DBInstanceIdentifier: aws.String(rs.Primary.ID),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(resp.DBInstances) != 0 &&
|
if len(resp.DBInstances) != 0 &&
|
||||||
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
|
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
|
||||||
return fmt.Errorf("DB Cluster Instance %s still exists", rs.Primary.ID)
|
return fmt.Errorf("DB Cluster Instance %s still exists", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return nil if the Cluster Instance is already destroyed
|
// Return nil if the Cluster Instance is already destroyed
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
if awsErr.Code() == "DBInstanceNotFound" {
|
if awsErr.Code() == "DBInstanceNotFound" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSDBClusterInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc {
|
func testAccCheckAWSDBClusterInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
|
|
||||||
if *v.Engine != "aurora" {
|
if *v.Engine != "aurora" {
|
||||||
return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine)
|
return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") {
|
if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") {
|
||||||
return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier)
|
return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc {
|
func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", n)
|
return fmt.Errorf("Not found: %s", n)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rs.Primary.ID == "" {
|
if rs.Primary.ID == "" {
|
||||||
return fmt.Errorf("No DB Instance ID is set")
|
return fmt.Errorf("No DB Instance ID is set")
|
||||||
}
|
}
|
||||||
|
|
||||||
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{
|
resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{
|
||||||
DBInstanceIdentifier: aws.String(rs.Primary.ID),
|
DBInstanceIdentifier: aws.String(rs.Primary.ID),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, d := range resp.DBInstances {
|
for _, d := range resp.DBInstances {
|
||||||
if *d.DBInstanceIdentifier == rs.Primary.ID {
|
if *d.DBInstanceIdentifier == rs.Primary.ID {
|
||||||
*v = *d
|
*v = *d
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
|
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add some random to the name, to avoid collision
|
// Add some random to the name, to avoid collision
|
||||||
|
|
|
@ -1,100 +1,100 @@
|
||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/rds"
|
"github.com/aws/aws-sdk-go/service/rds"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccAWSRDSCluster_basic(t *testing.T) {
|
func TestAccAWSRDSCluster_basic(t *testing.T) {
|
||||||
var v rds.DBCluster
|
var v rds.DBCluster
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSClusterDestroy,
|
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSClusterConfig,
|
Config: testAccAWSClusterConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
|
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSClusterDestroy(s *terraform.State) error {
|
func testAccCheckAWSClusterDestroy(s *terraform.State) error {
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
if rs.Type != "aws_rds_cluster" {
|
if rs.Type != "aws_rds_cluster" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to find the Group
|
// Try to find the Group
|
||||||
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
var err error
|
var err error
|
||||||
resp, err := conn.DescribeDBClusters(
|
resp, err := conn.DescribeDBClusters(
|
||||||
&rds.DescribeDBClustersInput{
|
&rds.DescribeDBClustersInput{
|
||||||
DBClusterIdentifier: aws.String(rs.Primary.ID),
|
DBClusterIdentifier: aws.String(rs.Primary.ID),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(resp.DBClusters) != 0 &&
|
if len(resp.DBClusters) != 0 &&
|
||||||
*resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID {
|
*resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID {
|
||||||
return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID)
|
return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return nil if the cluster is already destroyed
|
// Return nil if the cluster is already destroyed
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
if awsErr.Code() == "DBClusterNotFound" {
|
if awsErr.Code() == "DBClusterNotFound" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSClusterExists(n string, v *rds.DBCluster) resource.TestCheckFunc {
|
func testAccCheckAWSClusterExists(n string, v *rds.DBCluster) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", n)
|
return fmt.Errorf("Not found: %s", n)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rs.Primary.ID == "" {
|
if rs.Primary.ID == "" {
|
||||||
return fmt.Errorf("No DB Instance ID is set")
|
return fmt.Errorf("No DB Instance ID is set")
|
||||||
}
|
}
|
||||||
|
|
||||||
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
|
||||||
DBClusterIdentifier: aws.String(rs.Primary.ID),
|
DBClusterIdentifier: aws.String(rs.Primary.ID),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range resp.DBClusters {
|
for _, c := range resp.DBClusters {
|
||||||
if *c.DBClusterIdentifier == rs.Primary.ID {
|
if *c.DBClusterIdentifier == rs.Primary.ID {
|
||||||
*v = *c
|
*v = *c
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
|
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add some random to the name, to avoid collision
|
// Add some random to the name, to avoid collision
|
||||||
|
|
Loading…
Reference in New Issue