provider/aws: Support tags for AWS redshift cluster (#5356)
This commit is contained in:
parent
ca48196f5f
commit
1df8290134
|
@ -195,6 +195,8 @@ func resourceAwsRedshiftCluster() *schema.Resource {
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"tags": tagsSchema(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -203,6 +205,7 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{})
|
||||||
conn := meta.(*AWSClient).redshiftconn
|
conn := meta.(*AWSClient).redshiftconn
|
||||||
|
|
||||||
log.Printf("[INFO] Building Redshift Cluster Options")
|
log.Printf("[INFO] Building Redshift Cluster Options")
|
||||||
|
tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{}))
|
||||||
createOpts := &redshift.CreateClusterInput{
|
createOpts := &redshift.CreateClusterInput{
|
||||||
ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
||||||
Port: aws.Int64(int64(d.Get("port").(int))),
|
Port: aws.Int64(int64(d.Get("port").(int))),
|
||||||
|
@ -214,6 +217,7 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{})
|
||||||
AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)),
|
AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)),
|
||||||
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
|
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
|
||||||
AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))),
|
AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))),
|
||||||
|
Tags: tags,
|
||||||
}
|
}
|
||||||
|
|
||||||
if v := d.Get("number_of_nodes").(int); v > 1 {
|
if v := d.Get("number_of_nodes").(int); v > 1 {
|
||||||
|
@ -357,13 +361,27 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er
|
||||||
|
|
||||||
d.Set("cluster_public_key", rsc.ClusterPublicKey)
|
d.Set("cluster_public_key", rsc.ClusterPublicKey)
|
||||||
d.Set("cluster_revision_number", rsc.ClusterRevisionNumber)
|
d.Set("cluster_revision_number", rsc.ClusterRevisionNumber)
|
||||||
|
d.Set("tags", tagsToMapRedshift(rsc.Tags))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).redshiftconn
|
conn := meta.(*AWSClient).redshiftconn
|
||||||
|
d.Partial(true)
|
||||||
|
|
||||||
|
arn, tagErr := buildRedshiftARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region)
|
||||||
|
if tagErr != nil {
|
||||||
|
return fmt.Errorf("Error building ARN for Redshift Cluster, not updating Tags for cluster %s", d.Id())
|
||||||
|
} else {
|
||||||
|
if tagErr := setTagsRedshift(conn, d, arn); tagErr != nil {
|
||||||
|
return tagErr
|
||||||
|
} else {
|
||||||
|
d.SetPartial("tags")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
requestUpdate := false
|
||||||
log.Printf("[INFO] Building Redshift Modify Cluster Options")
|
log.Printf("[INFO] Building Redshift Modify Cluster Options")
|
||||||
req := &redshift.ModifyClusterInput{
|
req := &redshift.ModifyClusterInput{
|
||||||
ClusterIdentifier: aws.String(d.Id()),
|
ClusterIdentifier: aws.String(d.Id()),
|
||||||
|
@ -371,10 +389,12 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{})
|
||||||
|
|
||||||
if d.HasChange("cluster_type") {
|
if d.HasChange("cluster_type") {
|
||||||
req.ClusterType = aws.String(d.Get("cluster_type").(string))
|
req.ClusterType = aws.String(d.Get("cluster_type").(string))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("node_type") {
|
if d.HasChange("node_type") {
|
||||||
req.NodeType = aws.String(d.Get("node_type").(string))
|
req.NodeType = aws.String(d.Get("node_type").(string))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("number_of_nodes") {
|
if d.HasChange("number_of_nodes") {
|
||||||
|
@ -384,65 +404,80 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{})
|
||||||
} else {
|
} else {
|
||||||
req.ClusterType = aws.String("single-node")
|
req.ClusterType = aws.String("single-node")
|
||||||
}
|
}
|
||||||
|
|
||||||
req.NodeType = aws.String(d.Get("node_type").(string))
|
req.NodeType = aws.String(d.Get("node_type").(string))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("cluster_security_groups") {
|
if d.HasChange("cluster_security_groups") {
|
||||||
req.ClusterSecurityGroups = expandStringList(d.Get("cluster_security_groups").(*schema.Set).List())
|
req.ClusterSecurityGroups = expandStringList(d.Get("cluster_security_groups").(*schema.Set).List())
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("vpc_security_group_ips") {
|
if d.HasChange("vpc_security_group_ips") {
|
||||||
req.VpcSecurityGroupIds = expandStringList(d.Get("vpc_security_group_ips").(*schema.Set).List())
|
req.VpcSecurityGroupIds = expandStringList(d.Get("vpc_security_group_ips").(*schema.Set).List())
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("master_password") {
|
if d.HasChange("master_password") {
|
||||||
req.MasterUserPassword = aws.String(d.Get("master_password").(string))
|
req.MasterUserPassword = aws.String(d.Get("master_password").(string))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("cluster_parameter_group_name") {
|
if d.HasChange("cluster_parameter_group_name") {
|
||||||
req.ClusterParameterGroupName = aws.String(d.Get("cluster_parameter_group_name").(string))
|
req.ClusterParameterGroupName = aws.String(d.Get("cluster_parameter_group_name").(string))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("automated_snapshot_retention_period") {
|
if d.HasChange("automated_snapshot_retention_period") {
|
||||||
req.AutomatedSnapshotRetentionPeriod = aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int)))
|
req.AutomatedSnapshotRetentionPeriod = aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int)))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("preferred_maintenance_window") {
|
if d.HasChange("preferred_maintenance_window") {
|
||||||
req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string))
|
req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("cluster_version") {
|
if d.HasChange("cluster_version") {
|
||||||
req.ClusterVersion = aws.String(d.Get("cluster_version").(string))
|
req.ClusterVersion = aws.String(d.Get("cluster_version").(string))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("allow_version_upgrade") {
|
if d.HasChange("allow_version_upgrade") {
|
||||||
req.AllowVersionUpgrade = aws.Bool(d.Get("allow_version_upgrade").(bool))
|
req.AllowVersionUpgrade = aws.Bool(d.Get("allow_version_upgrade").(bool))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("publicly_accessible") {
|
if d.HasChange("publicly_accessible") {
|
||||||
req.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool))
|
req.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool))
|
||||||
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[INFO] Modifying Redshift Cluster: %s", d.Id())
|
if requestUpdate {
|
||||||
log.Printf("[DEBUG] Redshift Cluster Modify options: %s", req)
|
log.Printf("[INFO] Modifying Redshift Cluster: %s", d.Id())
|
||||||
_, err := conn.ModifyCluster(req)
|
log.Printf("[DEBUG] Redshift Cluster Modify options: %s", req)
|
||||||
if err != nil {
|
_, err := conn.ModifyCluster(req)
|
||||||
return fmt.Errorf("[WARN] Error modifying Redshift Cluster (%s): %s", d.Id(), err)
|
if err != nil {
|
||||||
|
return fmt.Errorf("[WARN] Error modifying Redshift Cluster (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming", "modifying"},
|
||||||
|
Target: []string{"available"},
|
||||||
|
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
|
||||||
|
Timeout: 40 * time.Minute,
|
||||||
|
MinTimeout: 10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait, catching any errors
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("[WARN] Error Modifying Redshift Cluster (%s): %s", d.Id(), err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stateConf := &resource.StateChangeConf{
|
d.Partial(false)
|
||||||
Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming", "modifying"},
|
|
||||||
Target: []string{"available"},
|
|
||||||
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
|
|
||||||
Timeout: 40 * time.Minute,
|
|
||||||
MinTimeout: 10 * time.Second,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait, catching any errors
|
|
||||||
_, err = stateConf.WaitForState()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("[WARN] Error Modifying Redshift Cluster (%s): %s", d.Id(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resourceAwsRedshiftClusterRead(d, meta)
|
return resourceAwsRedshiftClusterRead(d, meta)
|
||||||
}
|
}
|
||||||
|
@ -602,3 +637,12 @@ func validateRedshiftClusterMasterUsername(v interface{}, k string) (ws []string
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func buildRedshiftARN(identifier, accountid, region string) (string, error) {
|
||||||
|
if accountid == "" {
|
||||||
|
return "", fmt.Errorf("Unable to construct cluster ARN because of missing AWS Account ID")
|
||||||
|
}
|
||||||
|
arn := fmt.Sprintf("arn:aws:redshift:%s:%s:cluster:%s", region, accountid, identifier)
|
||||||
|
return arn, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -104,6 +104,41 @@ func TestAccAWSRedshiftCluster_updateNodeCount(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSRedshiftCluster_tags(t *testing.T) {
|
||||||
|
var v redshift.Cluster
|
||||||
|
|
||||||
|
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||||
|
preConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_tags, ri)
|
||||||
|
postConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_updatedTags, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSRedshiftClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: preConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_redshift_cluster.default", "tags.#", "3"),
|
||||||
|
resource.TestCheckResourceAttr("aws_redshift_cluster.default", "tags.environment", "Production"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
Config: postConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_redshift_cluster.default", "tags.#", "1"),
|
||||||
|
resource.TestCheckResourceAttr("aws_redshift_cluster.default", "tags.environment", "Production"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSRedshiftClusterDestroy(s *terraform.State) error {
|
func testAccCheckAWSRedshiftClusterDestroy(s *terraform.State) error {
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
if rs.Type != "aws_redshift_cluster" {
|
if rs.Type != "aws_redshift_cluster" {
|
||||||
|
@ -306,10 +341,6 @@ func TestResourceAWSRedshiftClusterMasterUsernameValidation(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccAWSRedshiftClusterConfig_updateNodeCount = `
|
var testAccAWSRedshiftClusterConfig_updateNodeCount = `
|
||||||
provider "aws" {
|
|
||||||
region = "us-west-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_redshift_cluster" "default" {
|
resource "aws_redshift_cluster" "default" {
|
||||||
cluster_identifier = "tf-redshift-cluster-%d"
|
cluster_identifier = "tf-redshift-cluster-%d"
|
||||||
availability_zone = "us-west-2a"
|
availability_zone = "us-west-2a"
|
||||||
|
@ -324,10 +355,6 @@ resource "aws_redshift_cluster" "default" {
|
||||||
`
|
`
|
||||||
|
|
||||||
var testAccAWSRedshiftClusterConfig_basic = `
|
var testAccAWSRedshiftClusterConfig_basic = `
|
||||||
provider "aws" {
|
|
||||||
region = "us-west-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_redshift_cluster" "default" {
|
resource "aws_redshift_cluster" "default" {
|
||||||
cluster_identifier = "tf-redshift-cluster-%d"
|
cluster_identifier = "tf-redshift-cluster-%d"
|
||||||
availability_zone = "us-west-2a"
|
availability_zone = "us-west-2a"
|
||||||
|
@ -339,11 +366,41 @@ resource "aws_redshift_cluster" "default" {
|
||||||
allow_version_upgrade = false
|
allow_version_upgrade = false
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var testAccAWSRedshiftClusterConfig_notPubliclyAccessible = `
|
var testAccAWSRedshiftClusterConfig_tags = `
|
||||||
provider "aws" {
|
resource "aws_redshift_cluster" "default" {
|
||||||
region = "us-west-2"
|
cluster_identifier = "tf-redshift-cluster-%d"
|
||||||
}
|
availability_zone = "us-west-2a"
|
||||||
|
database_name = "mydb"
|
||||||
|
master_username = "foo"
|
||||||
|
master_password = "Mustbe8characters"
|
||||||
|
node_type = "dc1.large"
|
||||||
|
automated_snapshot_retention_period = 7
|
||||||
|
allow_version_upgrade = false
|
||||||
|
|
||||||
|
tags {
|
||||||
|
environment = "Production"
|
||||||
|
cluster = "reader"
|
||||||
|
Type = "master"
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
var testAccAWSRedshiftClusterConfig_updatedTags = `
|
||||||
|
resource "aws_redshift_cluster" "default" {
|
||||||
|
cluster_identifier = "tf-redshift-cluster-%d"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
database_name = "mydb"
|
||||||
|
master_username = "foo"
|
||||||
|
master_password = "Mustbe8characters"
|
||||||
|
node_type = "dc1.large"
|
||||||
|
automated_snapshot_retention_period = 7
|
||||||
|
allow_version_upgrade = false
|
||||||
|
|
||||||
|
tags {
|
||||||
|
environment = "Production"
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
var testAccAWSRedshiftClusterConfig_notPubliclyAccessible = `
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
cidr_block = "10.1.0.0/16"
|
cidr_block = "10.1.0.0/16"
|
||||||
}
|
}
|
||||||
|
@ -402,10 +459,6 @@ resource "aws_redshift_cluster" "default" {
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var testAccAWSRedshiftClusterConfig_updatePubliclyAccessible = `
|
var testAccAWSRedshiftClusterConfig_updatePubliclyAccessible = `
|
||||||
provider "aws" {
|
|
||||||
region = "us-west-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
cidr_block = "10.1.0.0/16"
|
cidr_block = "10.1.0.0/16"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,71 @@
|
||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/redshift"
|
"github.com/aws/aws-sdk-go/service/redshift"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func setTagsRedshift(conn *redshift.Redshift, d *schema.ResourceData, arn string) error {
|
||||||
|
if d.HasChange("tags") {
|
||||||
|
oraw, nraw := d.GetChange("tags")
|
||||||
|
o := oraw.(map[string]interface{})
|
||||||
|
n := nraw.(map[string]interface{})
|
||||||
|
create, remove := diffTagsRedshift(tagsFromMapRedshift(o), tagsFromMapRedshift(n))
|
||||||
|
|
||||||
|
// Set tags
|
||||||
|
if len(remove) > 0 {
|
||||||
|
log.Printf("[DEBUG] Removing tags: %#v", remove)
|
||||||
|
k := make([]*string, len(remove), len(remove))
|
||||||
|
for i, t := range remove {
|
||||||
|
k[i] = t.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.DeleteTags(&redshift.DeleteTagsInput{
|
||||||
|
ResourceName: aws.String(arn),
|
||||||
|
TagKeys: k,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(create) > 0 {
|
||||||
|
log.Printf("[DEBUG] Creating tags: %#v", create)
|
||||||
|
_, err := conn.CreateTags(&redshift.CreateTagsInput{
|
||||||
|
ResourceName: aws.String(arn),
|
||||||
|
Tags: create,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func diffTagsRedshift(oldTags, newTags []*redshift.Tag) ([]*redshift.Tag, []*redshift.Tag) {
|
||||||
|
// First, we're creating everything we have
|
||||||
|
create := make(map[string]interface{})
|
||||||
|
for _, t := range newTags {
|
||||||
|
create[*t.Key] = *t.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the list of what to remove
|
||||||
|
var remove []*redshift.Tag
|
||||||
|
for _, t := range oldTags {
|
||||||
|
old, ok := create[*t.Key]
|
||||||
|
if !ok || old != *t.Value {
|
||||||
|
// Delete it!
|
||||||
|
remove = append(remove, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tagsFromMapRedshift(create), remove
|
||||||
|
}
|
||||||
|
|
||||||
func tagsFromMapRedshift(m map[string]interface{}) []*redshift.Tag {
|
func tagsFromMapRedshift(m map[string]interface{}) []*redshift.Tag {
|
||||||
result := make([]*redshift.Tag, 0, len(m))
|
result := make([]*redshift.Tag, 0, len(m))
|
||||||
for k, v := range m {
|
for k, v := range m {
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDiffRedshiftTags(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Old, New map[string]interface{}
|
||||||
|
Create, Remove map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Old: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
New: map[string]interface{}{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
Create: map[string]string{
|
||||||
|
"bar": "baz",
|
||||||
|
},
|
||||||
|
Remove: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: map[string]interface{}{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
New: map[string]interface{}{
|
||||||
|
"foo": "baz",
|
||||||
|
},
|
||||||
|
Create: map[string]string{
|
||||||
|
"foo": "baz",
|
||||||
|
},
|
||||||
|
Remove: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range cases {
|
||||||
|
c, r := diffTagsRedshift(tagsFromMapRedshift(tc.Old), tagsFromMapRedshift(tc.New))
|
||||||
|
cm := tagsToMapRedshift(c)
|
||||||
|
rm := tagsToMapRedshift(r)
|
||||||
|
if !reflect.DeepEqual(cm, tc.Create) {
|
||||||
|
t.Fatalf("%d: bad create: %#v", i, cm)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(rm, tc.Remove) {
|
||||||
|
t.Fatalf("%d: bad remove: %#v", i, rm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -56,6 +56,7 @@ string.
|
||||||
* `elastic_ip` - (Optional) The Elastic IP (EIP) address for the cluster.
|
* `elastic_ip` - (Optional) The Elastic IP (EIP) address for the cluster.
|
||||||
* `skip_final_snapshot` - (Optional) Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is true.
|
* `skip_final_snapshot` - (Optional) Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is true.
|
||||||
* `final_snapshot_identifier` - (Optional) The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, `skip_final_snapshot` must be false.
|
* `final_snapshot_identifier` - (Optional) The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, `skip_final_snapshot` must be false.
|
||||||
|
* `tags` - (Optional) A mapping of tags to assign to the resource.
|
||||||
|
|
||||||
## Attributes Reference
|
## Attributes Reference
|
||||||
|
|
||||||
|
@ -79,4 +80,3 @@ The following attributes are exported:
|
||||||
* `cluster_subnet_group_name` - The name of a cluster subnet group to be associated with this cluster
|
* `cluster_subnet_group_name` - The name of a cluster subnet group to be associated with this cluster
|
||||||
* `cluster_public_key` - The public key for the cluster
|
* `cluster_public_key` - The public key for the cluster
|
||||||
* `cluster_revision_number` - The specific revision number of the database in the cluster
|
* `cluster_revision_number` - The specific revision number of the database in the cluster
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue