provider/aws: Add support to `aws_redshift_cluster` for restoring from (#8414)

snapshot

Fixes #6406

Adds 2 new parameters:

* `snapshot_identifier`
* `snapshot_cluster_identifier`

These will be used to allow the Redshift cluster to be restored from a
pre-existing snapshot. Also makes the redshift username and password
fields optional as these are not required for the snapshot

```
% make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRedshiftCluster_'
==> Checking that code complies with gofmt requirements...
/Users/stacko/Code/go/bin/stringer
go generate $(go list ./... | grep -v /terraform/vendor/)
2016/08/23 12:04:53 Generated command/internal_plugin_list.go
TF_ACC=1 go test ./builtin/providers/aws -v
-run=TestAccAWSRedshiftCluster_ -timeout 120m
=== RUN   TestAccAWSRedshiftCluster_importBasic
--- PASS: TestAccAWSRedshiftCluster_importBasic (741.03s)
=== RUN   TestAccAWSRedshiftCluster_basic
--- PASS: TestAccAWSRedshiftCluster_basic (656.33s)
=== RUN   TestAccAWSRedshiftCluster_loggingEnabled
--- PASS: TestAccAWSRedshiftCluster_loggingEnabled (718.65s)
=== RUN   TestAccAWSRedshiftCluster_iamRoles
--- PASS: TestAccAWSRedshiftCluster_iamRoles (818.10s)
=== RUN   TestAccAWSRedshiftCluster_publiclyAccessible
--- PASS: TestAccAWSRedshiftCluster_publiclyAccessible (853.30s)
=== RUN   TestAccAWSRedshiftCluster_updateNodeCount
--- PASS: TestAccAWSRedshiftCluster_updateNodeCount (2083.37s)
=== RUN   TestAccAWSRedshiftCluster_tags
--- PASS: TestAccAWSRedshiftCluster_tags (621.15s)
PASS
ok      github.com/hashicorp/terraform/builtin/providers/aws
6491.963s
```

When deploying this, I was able to use config as follows:

```
resource "aws_redshift_cluster" "restore" {
  cluster_identifier = "my-test-restored-cluster"
  snapshot_identifier = "sample-snapshot-for-restore"
  node_type = "dc1.large"
}
```

And it resulted in:

```
terraform apply
[WARN] /Users/stacko/Code/go/bin/terraform-provider-aws overrides an internal plugin for aws-provider.
  If you did not expect to see this message you will need to remove the old plugin.
  See https://www.terraform.io/docs/internals/internal-plugins.html
aws_redshift_cluster.restore: Creating...
  allow_version_upgrade:               "" => "true"
  automated_snapshot_retention_period: "" => "1"
  availability_zone:                   "" => "<computed>"
  bucket_name:                         "" => "<computed>"
  cluster_identifier:                  "" => "my-test-restored-cluster"
  cluster_parameter_group_name:        "" => "<computed>"
  cluster_public_key:                  "" => "<computed>"
  cluster_revision_number:             "" => "<computed>"
  cluster_security_groups.#:           "" => "<computed>"
  cluster_subnet_group_name:           "" => "<computed>"
  cluster_type:                        "" => "<computed>"
  cluster_version:                     "" => "1.0"
  database_name:                       "" => "<computed>"
  enable_logging:                      "" => "false"
  encrypted:                           "" => "<computed>"
  endpoint:                            "" => "<computed>"
  iam_roles.#:                         "" => "<computed>"
  kms_key_id:                          "" => "<computed>"
  node_type:                           "" => "dc1.large"
  number_of_nodes:                     "" => "1"
  port:                                "" => "5439"
  preferred_maintenance_window:        "" => "<computed>"
  publicly_accessible:                 "" => "true"
  s3_key_prefix:                       "" => "<computed>"
  skip_final_snapshot:                 "" => "true"
  snapshot_identifier:                 "" => "sample-snapshot-for-restore"
  vpc_security_group_ids.#:            "" => "<computed>"
..........
aws_redshift_cluster.restore: Still creating... (5m20s elapsed)
aws_redshift_cluster.restore: Creation complete

Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
```
This commit is contained in:
Paul Stack 2016-08-24 10:58:05 +01:00 committed by GitHub
parent 50bb092a68
commit 3a2d73a2b1
2 changed files with 140 additions and 65 deletions

View File

@ -51,13 +51,14 @@ func resourceAwsRedshiftCluster() *schema.Resource {
"master_username": &schema.Schema{ "master_username": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
ValidateFunc: validateRedshiftClusterMasterUsername, ValidateFunc: validateRedshiftClusterMasterUsername,
}, },
"master_password": &schema.Schema{ "master_password": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
Sensitive: true,
}, },
"cluster_security_groups": &schema.Schema{ "cluster_security_groups": &schema.Schema{
@ -225,6 +226,16 @@ func resourceAwsRedshiftCluster() *schema.Resource {
Computed: true, Computed: true,
}, },
"snapshot_identifier": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"snapshot_cluster_identifier": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"tags": tagsSchema(), "tags": tagsSchema(),
}, },
} }
@ -241,89 +252,151 @@ func resourceAwsRedshiftClusterImport(
func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) error { func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).redshiftconn conn := meta.(*AWSClient).redshiftconn
log.Printf("[INFO] Building Redshift Cluster Options")
tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{})) tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{}))
createOpts := &redshift.CreateClusterInput{
ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Port: aws.Int64(int64(d.Get("port").(int))),
MasterUserPassword: aws.String(d.Get("master_password").(string)),
MasterUsername: aws.String(d.Get("master_username").(string)),
ClusterVersion: aws.String(d.Get("cluster_version").(string)),
NodeType: aws.String(d.Get("node_type").(string)),
DBName: aws.String(d.Get("database_name").(string)),
AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)),
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))),
Tags: tags,
}
if v := d.Get("number_of_nodes").(int); v > 1 { if v, ok := d.GetOk("snapshot_identifier"); ok {
createOpts.ClusterType = aws.String("multi-node") restoreOpts := &redshift.RestoreFromClusterSnapshotInput{
createOpts.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
SnapshotIdentifier: aws.String(v.(string)),
Port: aws.Int64(int64(d.Get("port").(int))),
AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)),
NodeType: aws.String(d.Get("node_type").(string)),
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))),
}
if v, ok := d.GetOk("snapshot_cluster_identifier"); ok {
restoreOpts.SnapshotClusterIdentifier = aws.String(v.(string))
}
if v, ok := d.GetOk("availability_zone"); ok {
restoreOpts.AvailabilityZone = aws.String(v.(string))
}
if v, ok := d.GetOk("cluster_subnet_group_name"); ok {
restoreOpts.ClusterSubnetGroupName = aws.String(v.(string))
}
if v, ok := d.GetOk("cluster_parameter_group_name"); ok {
restoreOpts.ClusterParameterGroupName = aws.String(v.(string))
}
if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 {
restoreOpts.ClusterSecurityGroups = expandStringList(v.List())
}
if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 {
restoreOpts.VpcSecurityGroupIds = expandStringList(v.List())
}
if v, ok := d.GetOk("preferred_maintenance_window"); ok {
restoreOpts.PreferredMaintenanceWindow = aws.String(v.(string))
}
if v, ok := d.GetOk("kms_key_id"); ok {
restoreOpts.KmsKeyId = aws.String(v.(string))
}
if v, ok := d.GetOk("elastic_ip"); ok {
restoreOpts.ElasticIp = aws.String(v.(string))
}
if v, ok := d.GetOk("iam_roles"); ok {
restoreOpts.IamRoles = expandStringList(v.(*schema.Set).List())
}
log.Printf("[DEBUG] Redshift Cluster restore cluster options: %s", restoreOpts)
resp, err := conn.RestoreFromClusterSnapshot(restoreOpts)
if err != nil {
log.Printf("[ERROR] Error Restoring Redshift Cluster from Snapshot: %s", err)
return err
}
d.SetId(*resp.Cluster.ClusterIdentifier)
} else { } else {
createOpts.ClusterType = aws.String("single-node") createOpts := &redshift.CreateClusterInput{
} ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
Port: aws.Int64(int64(d.Get("port").(int))),
MasterUserPassword: aws.String(d.Get("master_password").(string)),
MasterUsername: aws.String(d.Get("master_username").(string)),
ClusterVersion: aws.String(d.Get("cluster_version").(string)),
NodeType: aws.String(d.Get("node_type").(string)),
DBName: aws.String(d.Get("database_name").(string)),
AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)),
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))),
Tags: tags,
}
if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { if v := d.Get("number_of_nodes").(int); v > 1 {
createOpts.ClusterSecurityGroups = expandStringList(v.List()) createOpts.ClusterType = aws.String("multi-node")
} createOpts.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int)))
} else {
createOpts.ClusterType = aws.String("single-node")
}
if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 {
createOpts.VpcSecurityGroupIds = expandStringList(v.List()) createOpts.ClusterSecurityGroups = expandStringList(v.List())
} }
if v, ok := d.GetOk("cluster_subnet_group_name"); ok { if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 {
createOpts.ClusterSubnetGroupName = aws.String(v.(string)) createOpts.VpcSecurityGroupIds = expandStringList(v.List())
} }
if v, ok := d.GetOk("availability_zone"); ok { if v, ok := d.GetOk("cluster_subnet_group_name"); ok {
createOpts.AvailabilityZone = aws.String(v.(string)) createOpts.ClusterSubnetGroupName = aws.String(v.(string))
} }
if v, ok := d.GetOk("preferred_maintenance_window"); ok { if v, ok := d.GetOk("availability_zone"); ok {
createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) createOpts.AvailabilityZone = aws.String(v.(string))
} }
if v, ok := d.GetOk("cluster_parameter_group_name"); ok { if v, ok := d.GetOk("preferred_maintenance_window"); ok {
createOpts.ClusterParameterGroupName = aws.String(v.(string)) createOpts.PreferredMaintenanceWindow = aws.String(v.(string))
} }
if v, ok := d.GetOk("encrypted"); ok { if v, ok := d.GetOk("cluster_parameter_group_name"); ok {
createOpts.Encrypted = aws.Bool(v.(bool)) createOpts.ClusterParameterGroupName = aws.String(v.(string))
} }
if v, ok := d.GetOk("kms_key_id"); ok { if v, ok := d.GetOk("encrypted"); ok {
createOpts.KmsKeyId = aws.String(v.(string)) createOpts.Encrypted = aws.Bool(v.(bool))
} }
if v, ok := d.GetOk("elastic_ip"); ok { if v, ok := d.GetOk("kms_key_id"); ok {
createOpts.ElasticIp = aws.String(v.(string)) createOpts.KmsKeyId = aws.String(v.(string))
} }
if v, ok := d.GetOk("iam_roles"); ok { if v, ok := d.GetOk("elastic_ip"); ok {
createOpts.IamRoles = expandStringList(v.(*schema.Set).List()) createOpts.ElasticIp = aws.String(v.(string))
} }
log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts) if v, ok := d.GetOk("iam_roles"); ok {
resp, err := conn.CreateCluster(createOpts) createOpts.IamRoles = expandStringList(v.(*schema.Set).List())
if err != nil { }
log.Printf("[ERROR] Error creating Redshift Cluster: %s", err)
return err
}
log.Printf("[DEBUG]: Cluster create response: %s", resp) log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts)
d.SetId(*resp.Cluster.ClusterIdentifier) resp, err := conn.CreateCluster(createOpts)
if err != nil {
log.Printf("[ERROR] Error creating Redshift Cluster: %s", err)
return err
}
log.Printf("[DEBUG]: Cluster create response: %s", resp)
d.SetId(*resp.Cluster.ClusterIdentifier)
}
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"}, Pending: []string{"creating", "backing-up", "modifying", "restoring"},
Target: []string{"available"}, Target: []string{"available"},
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta), Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute, Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second, MinTimeout: 10 * time.Second,
} }
_, err = stateConf.WaitForState() _, err := stateConf.WaitForState()
if err != nil { if err != nil {
return fmt.Errorf("[WARN] Error waiting for Redshift Cluster state to be \"available\": %s", err) return fmt.Errorf("[WARN] Error waiting for Redshift Cluster state to be \"available\": %s", err)
} }

View File

@ -32,8 +32,8 @@ The following arguments are supported:
string. string.
* `database_name` - (Optional) The name of the first database to be created when the cluster is created. * `database_name` - (Optional) The name of the first database to be created when the cluster is created.
If you do not provide a name, Amazon Redshift will create a default database called `dev`. If you do not provide a name, Amazon Redshift will create a default database called `dev`.
* `node_type` - (Required) The node type to be provisioned for the cluster. * `node_type` - (Optional) The node type to be provisioned for the cluster.
* `master_password` - (Required) Password for the master DB user. Note that this may * `master_password` - (Optional) Password for the master DB user. Note that this may
show up in logs, and it will be stored in the state file show up in logs, and it will be stored in the state file
* `master_username` - (Required) Username for the master DB user * `master_username` - (Required) Username for the master DB user
* `cluster_security_groups` - (Optional) A list of security groups to be associated with this cluster. * `cluster_security_groups` - (Optional) A list of security groups to be associated with this cluster.
@ -61,6 +61,8 @@ string.
* `bucket_name` - (Optional, required when `enable_logging` is `true`) The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. * `bucket_name` - (Optional, required when `enable_logging` is `true`) The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions.
For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging) For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging)
* `s3_key_prefix` - (Optional) The prefix applied to the log file names. * `s3_key_prefix` - (Optional) The prefix applied to the log file names.
* `snapshot_identifier` - (Optional) The name of the snapshot from which to create the new cluster.
* `snapshot_cluster_identifier` - (Optional) The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
* `tags` - (Optional) A mapping of tags to assign to the resource. * `tags` - (Optional) A mapping of tags to assign to the resource.