go fmt after rebase

This commit is contained in:
Clint Shryock 2015-10-07 11:27:24 -05:00
parent 7abe2a10e7
commit 71b1cb1289
4 changed files with 595 additions and 595 deletions

View File

@ -1,347 +1,347 @@
package aws
import (
"fmt"
"log"
"regexp"
"time"
"fmt"
"log"
"regexp"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsRDSCluster() *schema.Resource {
return &schema.Resource{
Create: resourceAwsRDSClusterCreate,
Read: resourceAwsRDSClusterRead,
Update: resourceAwsRDSClusterUpdate,
Delete: resourceAwsRDSClusterDelete,
return &schema.Resource{
Create: resourceAwsRDSClusterCreate,
Read: resourceAwsRDSClusterRead,
Update: resourceAwsRDSClusterUpdate,
Delete: resourceAwsRDSClusterDelete,
Schema: map[string]*schema.Schema{
Schema: map[string]*schema.Schema{
"availability_zones": &schema.Schema{
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
ForceNew: true,
Computed: true,
Set: schema.HashString,
},
"availability_zones": &schema.Schema{
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
ForceNew: true,
Computed: true,
Set: schema.HashString,
},
"cluster_identifier": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateRdsId,
},
"cluster_identifier": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateRdsId,
},
"cluster_members": &schema.Schema{
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
Computed: true,
Set: schema.HashString,
},
"cluster_members": &schema.Schema{
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
Computed: true,
Set: schema.HashString,
},
"database_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"database_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"db_subnet_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"db_subnet_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"engine": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"engine": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"final_snapshot_identifier": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
value := v.(string)
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
es = append(es, fmt.Errorf(
"only alphanumeric characters and hyphens allowed in %q", k))
}
if regexp.MustCompile(`--`).MatchString(value) {
es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k))
}
if regexp.MustCompile(`-$`).MatchString(value) {
es = append(es, fmt.Errorf("%q cannot end in a hyphen", k))
}
return
},
},
"final_snapshot_identifier": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
value := v.(string)
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
es = append(es, fmt.Errorf(
"only alphanumeric characters and hyphens allowed in %q", k))
}
if regexp.MustCompile(`--`).MatchString(value) {
es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k))
}
if regexp.MustCompile(`-$`).MatchString(value) {
es = append(es, fmt.Errorf("%q cannot end in a hyphen", k))
}
return
},
},
"master_username": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"master_username": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"master_password": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"master_password": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"port": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"port": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
// apply_immediately is used to determine when the update modifications
// take place.
// See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html
"apply_immediately": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
// apply_immediately is used to determine when the update modifications
// take place.
// See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html
"apply_immediately": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"vpc_security_group_ids": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
},
}
"vpc_security_group_ids": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
},
}
}
func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
conn := meta.(*AWSClient).rdsconn
createOpts := &rds.CreateDBClusterInput{
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Engine: aws.String("aurora"),
MasterUserPassword: aws.String(d.Get("master_password").(string)),
MasterUsername: aws.String(d.Get("master_username").(string)),
}
createOpts := &rds.CreateDBClusterInput{
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Engine: aws.String("aurora"),
MasterUserPassword: aws.String(d.Get("master_password").(string)),
MasterUsername: aws.String(d.Get("master_username").(string)),
}
if v := d.Get("database_name"); v.(string) != "" {
createOpts.DatabaseName = aws.String(v.(string))
}
if v := d.Get("database_name"); v.(string) != "" {
createOpts.DatabaseName = aws.String(v.(string))
}
if attr, ok := d.GetOk("port"); ok {
createOpts.Port = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("port"); ok {
createOpts.Port = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
createOpts.DBSubnetGroupName = aws.String(attr.(string))
}
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
createOpts.DBSubnetGroupName = aws.String(attr.(string))
}
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
createOpts.VpcSecurityGroupIds = expandStringList(attr.List())
}
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
createOpts.VpcSecurityGroupIds = expandStringList(attr.List())
}
if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 {
createOpts.AvailabilityZones = expandStringList(attr.List())
}
if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 {
createOpts.AvailabilityZones = expandStringList(attr.List())
}
log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
resp, err := conn.CreateDBCluster(createOpts)
if err != nil {
log.Printf("[ERROR] Error creating RDS Cluster: %s", err)
return err
}
log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
resp, err := conn.CreateDBCluster(createOpts)
if err != nil {
log.Printf("[ERROR] Error creating RDS Cluster: %s", err)
return err
}
log.Printf("[DEBUG]: Cluster create response: %s", resp)
d.SetId(*resp.DBCluster.DBClusterIdentifier)
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"},
Target: "available",
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
Timeout: 5 * time.Minute,
MinTimeout: 3 * time.Second,
}
log.Printf("[DEBUG]: Cluster create response: %s", resp)
d.SetId(*resp.DBCluster.DBClusterIdentifier)
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"},
Target: "available",
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
Timeout: 5 * time.Minute,
MinTimeout: 3 * time.Second,
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err)
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err)
}
return resourceAwsRDSClusterRead(d, meta)
return resourceAwsRDSClusterRead(d, meta)
}
func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
conn := meta.(*AWSClient).rdsconn
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: aws.String(d.Id()),
})
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: aws.String(d.Id()),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if "DBClusterNotFoundFault" == awsErr.Code() {
d.SetId("")
log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id())
return nil
}
}
log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id())
return err
}
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if "DBClusterNotFoundFault" == awsErr.Code() {
d.SetId("")
log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id())
return nil
}
}
log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id())
return err
}
var dbc *rds.DBCluster
for _, c := range resp.DBClusters {
if *c.DBClusterIdentifier == d.Id() {
dbc = c
}
}
var dbc *rds.DBCluster
for _, c := range resp.DBClusters {
if *c.DBClusterIdentifier == d.Id() {
dbc = c
}
}
if dbc == nil {
log.Printf("[WARN] RDS Cluster (%s) not found", d.Id())
d.SetId("")
return nil
}
if dbc == nil {
log.Printf("[WARN] RDS Cluster (%s) not found", d.Id())
d.SetId("")
return nil
}
if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil {
return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err)
}
d.Set("database_name", dbc.DatabaseName)
d.Set("db_subnet_group_name", dbc.DBSubnetGroup)
d.Set("endpoint", dbc.Endpoint)
d.Set("engine", dbc.Engine)
d.Set("master_username", dbc.MasterUsername)
d.Set("port", dbc.Port)
if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil {
return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err)
}
d.Set("database_name", dbc.DatabaseName)
d.Set("db_subnet_group_name", dbc.DBSubnetGroup)
d.Set("endpoint", dbc.Endpoint)
d.Set("engine", dbc.Engine)
d.Set("master_username", dbc.MasterUsername)
d.Set("port", dbc.Port)
var vpcg []string
for _, g := range dbc.VpcSecurityGroups {
vpcg = append(vpcg, *g.VpcSecurityGroupId)
}
if err := d.Set("vpc_security_group_ids", vpcg); err != nil {
return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err)
}
var vpcg []string
for _, g := range dbc.VpcSecurityGroups {
vpcg = append(vpcg, *g.VpcSecurityGroupId)
}
if err := d.Set("vpc_security_group_ids", vpcg); err != nil {
return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err)
}
var cm []string
for _, m := range dbc.DBClusterMembers {
cm = append(cm, *m.DBInstanceIdentifier)
}
if err := d.Set("cluster_members", cm); err != nil {
return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err)
}
var cm []string
for _, m := range dbc.DBClusterMembers {
cm = append(cm, *m.DBInstanceIdentifier)
}
if err := d.Set("cluster_members", cm); err != nil {
return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err)
}
return nil
return nil
}
func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
conn := meta.(*AWSClient).rdsconn
req := &rds.ModifyDBClusterInput{
ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
DBClusterIdentifier: aws.String(d.Id()),
}
req := &rds.ModifyDBClusterInput{
ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
DBClusterIdentifier: aws.String(d.Id()),
}
if d.HasChange("master_password") {
req.MasterUserPassword = aws.String(d.Get("master_password").(string))
}
if d.HasChange("master_password") {
req.MasterUserPassword = aws.String(d.Get("master_password").(string))
}
if d.HasChange("vpc_security_group_ids") {
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
req.VpcSecurityGroupIds = expandStringList(attr.List())
} else {
req.VpcSecurityGroupIds = []*string{}
}
}
if d.HasChange("vpc_security_group_ids") {
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
req.VpcSecurityGroupIds = expandStringList(attr.List())
} else {
req.VpcSecurityGroupIds = []*string{}
}
}
_, err := conn.ModifyDBCluster(req)
if err != nil {
return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err)
}
_, err := conn.ModifyDBCluster(req)
if err != nil {
return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err)
}
return resourceAwsRDSClusterRead(d, meta)
return resourceAwsRDSClusterRead(d, meta)
}
func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id())
conn := meta.(*AWSClient).rdsconn
log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id())
deleteOpts := rds.DeleteDBClusterInput{
DBClusterIdentifier: aws.String(d.Id()),
}
deleteOpts := rds.DeleteDBClusterInput{
DBClusterIdentifier: aws.String(d.Id()),
}
finalSnapshot := d.Get("final_snapshot_identifier").(string)
if finalSnapshot == "" {
deleteOpts.SkipFinalSnapshot = aws.Bool(true)
} else {
deleteOpts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
deleteOpts.SkipFinalSnapshot = aws.Bool(false)
}
finalSnapshot := d.Get("final_snapshot_identifier").(string)
if finalSnapshot == "" {
deleteOpts.SkipFinalSnapshot = aws.Bool(true)
} else {
deleteOpts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
deleteOpts.SkipFinalSnapshot = aws.Bool(false)
}
log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts)
_, err := conn.DeleteDBCluster(&deleteOpts)
log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts)
_, err := conn.DeleteDBCluster(&deleteOpts)
stateConf := &resource.StateChangeConf{
Pending: []string{"deleting", "backing-up", "modifying"},
Target: "destroyed",
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
Timeout: 5 * time.Minute,
MinTimeout: 3 * time.Second,
}
stateConf := &resource.StateChangeConf{
Pending: []string{"deleting", "backing-up", "modifying"},
Target: "destroyed",
Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta),
Timeout: 5 * time.Minute,
MinTimeout: 3 * time.Second,
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err)
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err)
}
return nil
return nil
}
func resourceAwsRDSClusterStateRefreshFunc(
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
conn := meta.(*AWSClient).rdsconn
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
conn := meta.(*AWSClient).rdsconn
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: aws.String(d.Id()),
})
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: aws.String(d.Id()),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if "DBClusterNotFoundFault" == awsErr.Code() {
return 42, "destroyed", nil
}
}
log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err)
return nil, "", err
}
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if "DBClusterNotFoundFault" == awsErr.Code() {
return 42, "destroyed", nil
}
}
log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err)
return nil, "", err
}
var dbc *rds.DBCluster
var dbc *rds.DBCluster
for _, c := range resp.DBClusters {
if *c.DBClusterIdentifier == d.Id() {
dbc = c
}
}
for _, c := range resp.DBClusters {
if *c.DBClusterIdentifier == d.Id() {
dbc = c
}
}
if dbc == nil {
return 42, "destroyed", nil
}
if dbc == nil {
return 42, "destroyed", nil
}
if dbc.Status != nil {
log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status)
}
if dbc.Status != nil {
log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status)
}
return dbc, *dbc.Status, nil
}
return dbc, *dbc.Status, nil
}
}

View File

@ -1,220 +1,220 @@
package aws
import (
"fmt"
"log"
"time"
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsRDSClusterInstance() *schema.Resource {
return &schema.Resource{
Create: resourceAwsRDSClusterInstanceCreate,
Read: resourceAwsRDSClusterInstanceRead,
Update: resourceAwsRDSClusterInstanceUpdate,
Delete: resourceAwsRDSClusterInstanceDelete,
return &schema.Resource{
Create: resourceAwsRDSClusterInstanceCreate,
Read: resourceAwsRDSClusterInstanceRead,
Update: resourceAwsRDSClusterInstanceUpdate,
Delete: resourceAwsRDSClusterInstanceDelete,
Schema: map[string]*schema.Schema{
"identifier": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateRdsId,
},
Schema: map[string]*schema.Schema{
"identifier": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateRdsId,
},
"db_subnet_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"db_subnet_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"writer": &schema.Schema{
Type: schema.TypeBool,
Computed: true,
},
"writer": &schema.Schema{
Type: schema.TypeBool,
Computed: true,
},
"cluster_identifier": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"cluster_identifier": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"port": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"port": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"publicly_accessible": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"publicly_accessible": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"instance_class": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"instance_class": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"tags": tagsSchema(),
},
}
"tags": tagsSchema(),
},
}
}
func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
conn := meta.(*AWSClient).rdsconn
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
createOpts := &rds.CreateDBInstanceInput{
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Engine: aws.String("aurora"),
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
Tags: tags,
}
createOpts := &rds.CreateDBInstanceInput{
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Engine: aws.String("aurora"),
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
Tags: tags,
}
if v := d.Get("identifier").(string); v != "" {
createOpts.DBInstanceIdentifier = aws.String(v)
} else {
createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId())
}
if v := d.Get("identifier").(string); v != "" {
createOpts.DBInstanceIdentifier = aws.String(v)
} else {
createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId())
}
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
createOpts.DBSubnetGroupName = aws.String(attr.(string))
}
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
createOpts.DBSubnetGroupName = aws.String(attr.(string))
}
log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts)
resp, err := conn.CreateDBInstance(createOpts)
if err != nil {
return err
}
log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts)
resp, err := conn.CreateDBInstance(createOpts)
if err != nil {
return err
}
d.SetId(*resp.DBInstance.DBInstanceIdentifier)
d.SetId(*resp.DBInstance.DBInstanceIdentifier)
// reuse db_instance refresh func
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"},
Target: "available",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 10 * time.Second,
}
// reuse db_instance refresh func
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"},
Target: "available",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 10 * time.Second,
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return err
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return err
}
return resourceAwsRDSClusterInstanceRead(d, meta)
return resourceAwsRDSClusterInstanceRead(d, meta)
}
func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) error {
db, err := resourceAwsDbInstanceRetrieve(d, meta)
if err != nil {
log.Printf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err)
d.SetId("")
return nil
}
db, err := resourceAwsDbInstanceRetrieve(d, meta)
if err != nil {
log.Printf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err)
d.SetId("")
return nil
}
// Retreive DB Cluster information, to determine if this Instance is a writer
conn := meta.(*AWSClient).rdsconn
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: db.DBClusterIdentifier,
})
// Retreive DB Cluster information, to determine if this Instance is a writer
conn := meta.(*AWSClient).rdsconn
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: db.DBClusterIdentifier,
})
var dbc *rds.DBCluster
for _, c := range resp.DBClusters {
if *c.DBClusterIdentifier == *db.DBClusterIdentifier {
dbc = c
}
}
var dbc *rds.DBCluster
for _, c := range resp.DBClusters {
if *c.DBClusterIdentifier == *db.DBClusterIdentifier {
dbc = c
}
}
if dbc == nil {
return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s",
*db.DBClusterIdentifier, *db.DBInstanceIdentifier, err)
}
if dbc == nil {
return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s",
*db.DBClusterIdentifier, *db.DBInstanceIdentifier, err)
}
for _, m := range dbc.DBClusterMembers {
if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier {
if *m.IsClusterWriter == true {
d.Set("writer", true)
} else {
d.Set("writer", false)
}
}
}
for _, m := range dbc.DBClusterMembers {
if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier {
if *m.IsClusterWriter == true {
d.Set("writer", true)
} else {
d.Set("writer", false)
}
}
}
if db.Endpoint != nil {
d.Set("endpoint", db.Endpoint.Address)
d.Set("port", db.Endpoint.Port)
}
if db.Endpoint != nil {
d.Set("endpoint", db.Endpoint.Address)
d.Set("port", db.Endpoint.Port)
}
d.Set("publicly_accessible", db.PubliclyAccessible)
d.Set("publicly_accessible", db.PubliclyAccessible)
// Fetch and save tags
arn, err := buildRDSARN(d, meta)
if err != nil {
log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier)
} else {
if err := saveTagsRDS(conn, d, arn); err != nil {
log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err)
}
}
// Fetch and save tags
arn, err := buildRDSARN(d, meta)
if err != nil {
log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier)
} else {
if err := saveTagsRDS(conn, d, arn); err != nil {
log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err)
}
}
return nil
return nil
}
func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
conn := meta.(*AWSClient).rdsconn
if arn, err := buildRDSARN(d, meta); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
}
}
if arn, err := buildRDSARN(d, meta); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
}
}
return resourceAwsRDSClusterInstanceRead(d, meta)
return resourceAwsRDSClusterInstanceRead(d, meta)
}
func resourceAwsRDSClusterInstanceDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
conn := meta.(*AWSClient).rdsconn
log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id())
log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id())
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts)
if _, err := conn.DeleteDBInstance(&opts); err != nil {
return err
}
log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts)
if _, err := conn.DeleteDBInstance(&opts); err != nil {
return err
}
// re-uses db_instance refresh func
log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed")
stateConf := &resource.StateChangeConf{
Pending: []string{"modifying", "deleting"},
Target: "",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
}
// re-uses db_instance refresh func
log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed")
stateConf := &resource.StateChangeConf{
Pending: []string{"modifying", "deleting"},
Target: "",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
return nil
return nil
}

View File

@ -1,118 +1,118 @@
package aws
import (
"fmt"
"math/rand"
"strings"
"testing"
"time"
"fmt"
"math/rand"
"strings"
"testing"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
)
func TestAccAWSRDSClusterInstance_basic(t *testing.T) {
var v rds.DBInstance
var v rds.DBInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSClusterInstanceConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v),
testAccCheckAWSDBClusterInstanceAttributes(&v),
),
},
},
})
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSClusterInstanceConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v),
testAccCheckAWSDBClusterInstanceAttributes(&v),
),
},
},
})
}
func testAccCheckAWSClusterInstanceDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_rds_cluster" {
continue
}
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_rds_cluster" {
continue
}
// Try to find the Group
conn := testAccProvider.Meta().(*AWSClient).rdsconn
var err error
resp, err := conn.DescribeDBInstances(
&rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(rs.Primary.ID),
})
// Try to find the Group
conn := testAccProvider.Meta().(*AWSClient).rdsconn
var err error
resp, err := conn.DescribeDBInstances(
&rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(rs.Primary.ID),
})
if err == nil {
if len(resp.DBInstances) != 0 &&
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
return fmt.Errorf("DB Cluster Instance %s still exists", rs.Primary.ID)
}
}
if err == nil {
if len(resp.DBInstances) != 0 &&
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
return fmt.Errorf("DB Cluster Instance %s still exists", rs.Primary.ID)
}
}
// Return nil if the Cluster Instance is already destroyed
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "DBInstanceNotFound" {
return nil
}
}
// Return nil if the Cluster Instance is already destroyed
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "DBInstanceNotFound" {
return nil
}
}
return err
return err
}
}
return nil
return nil
}
func testAccCheckAWSDBClusterInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
return func(s *terraform.State) error {
if *v.Engine != "aurora" {
return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine)
}
if *v.Engine != "aurora" {
return fmt.Errorf("bad engine, expected \"aurora\": %#v", *v.Engine)
}
if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") {
return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier)
}
if !strings.HasPrefix(*v.DBClusterIdentifier, "tf-aurora-cluster") {
return fmt.Errorf("Bad Cluster Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster", *v.DBClusterIdentifier)
}
return nil
}
return nil
}
}
func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB Instance ID is set")
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB Instance ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).rdsconn
resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(rs.Primary.ID),
})
conn := testAccProvider.Meta().(*AWSClient).rdsconn
resp, err := conn.DescribeDBInstances(&rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(rs.Primary.ID),
})
if err != nil {
return err
}
if err != nil {
return err
}
for _, d := range resp.DBInstances {
if *d.DBInstanceIdentifier == rs.Primary.ID {
*v = *d
return nil
}
}
for _, d := range resp.DBInstances {
if *d.DBInstanceIdentifier == rs.Primary.ID {
*v = *d
return nil
}
}
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
}
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
}
}
// Add some random to the name, to avoid collision

View File

@ -1,100 +1,100 @@
package aws
import (
"fmt"
"math/rand"
"testing"
"time"
"fmt"
"math/rand"
"testing"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
)
func TestAccAWSRDSCluster_basic(t *testing.T) {
var v rds.DBCluster
var v rds.DBCluster
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSClusterConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
),
},
},
})
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSClusterConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
),
},
},
})
}
func testAccCheckAWSClusterDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_rds_cluster" {
continue
}
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_rds_cluster" {
continue
}
// Try to find the Group
conn := testAccProvider.Meta().(*AWSClient).rdsconn
var err error
resp, err := conn.DescribeDBClusters(
&rds.DescribeDBClustersInput{
DBClusterIdentifier: aws.String(rs.Primary.ID),
})
// Try to find the Group
conn := testAccProvider.Meta().(*AWSClient).rdsconn
var err error
resp, err := conn.DescribeDBClusters(
&rds.DescribeDBClustersInput{
DBClusterIdentifier: aws.String(rs.Primary.ID),
})
if err == nil {
if len(resp.DBClusters) != 0 &&
*resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID {
return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID)
}
}
if err == nil {
if len(resp.DBClusters) != 0 &&
*resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID {
return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID)
}
}
// Return nil if the cluster is already destroyed
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "DBClusterNotFound" {
return nil
}
}
// Return nil if the cluster is already destroyed
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "DBClusterNotFound" {
return nil
}
}
return err
}
return err
}
return nil
return nil
}
func testAccCheckAWSClusterExists(n string, v *rds.DBCluster) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB Instance ID is set")
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB Instance ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).rdsconn
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: aws.String(rs.Primary.ID),
})
conn := testAccProvider.Meta().(*AWSClient).rdsconn
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: aws.String(rs.Primary.ID),
})
if err != nil {
return err
}
if err != nil {
return err
}
for _, c := range resp.DBClusters {
if *c.DBClusterIdentifier == rs.Primary.ID {
*v = *c
return nil
}
}
for _, c := range resp.DBClusters {
if *c.DBClusterIdentifier == rs.Primary.ID {
*v = *c
return nil
}
}
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
}
return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID)
}
}
// Add some random to the name, to avoid collision