diff --git a/builtin/providers/aws/resource_aws_redshift_cluster.go b/builtin/providers/aws/resource_aws_redshift_cluster.go index b8fc2b874..54e270f70 100644 --- a/builtin/providers/aws/resource_aws_redshift_cluster.go +++ b/builtin/providers/aws/resource_aws_redshift_cluster.go @@ -51,13 +51,14 @@ func resourceAwsRedshiftCluster() *schema.Resource { "master_username": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ValidateFunc: validateRedshiftClusterMasterUsername, }, "master_password": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + Sensitive: true, }, "cluster_security_groups": &schema.Schema{ @@ -225,6 +226,16 @@ func resourceAwsRedshiftCluster() *schema.Resource { Computed: true, }, + "snapshot_identifier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "snapshot_cluster_identifier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "tags": tagsSchema(), }, } @@ -241,89 +252,151 @@ func resourceAwsRedshiftClusterImport( func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).redshiftconn - - log.Printf("[INFO] Building Redshift Cluster Options") tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{})) - createOpts := &redshift.CreateClusterInput{ - ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - MasterUserPassword: aws.String(d.Get("master_password").(string)), - MasterUsername: aws.String(d.Get("master_username").(string)), - ClusterVersion: aws.String(d.Get("cluster_version").(string)), - NodeType: aws.String(d.Get("node_type").(string)), - DBName: aws.String(d.Get("database_name").(string)), - AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), - PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), - AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), - Tags: tags, - } - if v := d.Get("number_of_nodes").(int); v > 1 { - createOpts.ClusterType = aws.String("multi-node") - createOpts.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) + if v, ok := d.GetOk("snapshot_identifier"); ok { + restoreOpts := &redshift.RestoreFromClusterSnapshotInput{ + ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + SnapshotIdentifier: aws.String(v.(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), + NodeType: aws.String(d.Get("node_type").(string)), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), + } + + if v, ok := d.GetOk("snapshot_cluster_identifier"); ok { + restoreOpts.SnapshotClusterIdentifier = aws.String(v.(string)) + } + + if v, ok := d.GetOk("availability_zone"); ok { + restoreOpts.AvailabilityZone = aws.String(v.(string)) + } + + if v, ok := d.GetOk("cluster_subnet_group_name"); ok { + restoreOpts.ClusterSubnetGroupName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("cluster_parameter_group_name"); ok { + restoreOpts.ClusterParameterGroupName = aws.String(v.(string)) + } + + if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { + restoreOpts.ClusterSecurityGroups = expandStringList(v.List()) + } + + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + restoreOpts.VpcSecurityGroupIds = expandStringList(v.List()) + } + + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + restoreOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("kms_key_id"); ok { + restoreOpts.KmsKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("elastic_ip"); ok { + restoreOpts.ElasticIp = aws.String(v.(string)) + } + + if v, ok := d.GetOk("iam_roles"); ok { + restoreOpts.IamRoles = expandStringList(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] Redshift Cluster restore cluster options: %s", restoreOpts) + + resp, err := conn.RestoreFromClusterSnapshot(restoreOpts) + if err != nil { + log.Printf("[ERROR] Error Restoring Redshift Cluster from Snapshot: %s", err) + return err + } + + d.SetId(*resp.Cluster.ClusterIdentifier) + } else { - createOpts.ClusterType = aws.String("single-node") - } + createOpts := &redshift.CreateClusterInput{ + ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + MasterUserPassword: aws.String(d.Get("master_password").(string)), + MasterUsername: aws.String(d.Get("master_username").(string)), + ClusterVersion: aws.String(d.Get("cluster_version").(string)), + NodeType: aws.String(d.Get("node_type").(string)), + DBName: aws.String(d.Get("database_name").(string)), + AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), + Tags: tags, + } - if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { - createOpts.ClusterSecurityGroups = expandStringList(v.List()) - } + if v := d.Get("number_of_nodes").(int); v > 1 { + createOpts.ClusterType = aws.String("multi-node") + createOpts.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) + } else { + createOpts.ClusterType = aws.String("single-node") + } - if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(v.List()) - } + if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { + createOpts.ClusterSecurityGroups = expandStringList(v.List()) + } - if v, ok := d.GetOk("cluster_subnet_group_name"); ok { - createOpts.ClusterSubnetGroupName = aws.String(v.(string)) - } + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + createOpts.VpcSecurityGroupIds = expandStringList(v.List()) + } - if v, ok := d.GetOk("availability_zone"); ok { - createOpts.AvailabilityZone = aws.String(v.(string)) - } + if v, ok := d.GetOk("cluster_subnet_group_name"); ok { + createOpts.ClusterSubnetGroupName = aws.String(v.(string)) + } - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) - } + if v, ok := d.GetOk("availability_zone"); ok { + createOpts.AvailabilityZone = aws.String(v.(string)) + } - if v, ok := d.GetOk("cluster_parameter_group_name"); ok { - createOpts.ClusterParameterGroupName = aws.String(v.(string)) - } + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + } - if v, ok := d.GetOk("encrypted"); ok { - createOpts.Encrypted = aws.Bool(v.(bool)) - } + if v, ok := d.GetOk("cluster_parameter_group_name"); ok { + createOpts.ClusterParameterGroupName = aws.String(v.(string)) + } - if v, ok := d.GetOk("kms_key_id"); ok { - createOpts.KmsKeyId = aws.String(v.(string)) - } + if v, ok := d.GetOk("encrypted"); ok { + createOpts.Encrypted = aws.Bool(v.(bool)) + } - if v, ok := d.GetOk("elastic_ip"); ok { - createOpts.ElasticIp = aws.String(v.(string)) - } + if v, ok := d.GetOk("kms_key_id"); ok { + createOpts.KmsKeyId = aws.String(v.(string)) + } - if v, ok := d.GetOk("iam_roles"); ok { - createOpts.IamRoles = expandStringList(v.(*schema.Set).List()) - } + if v, ok := d.GetOk("elastic_ip"); ok { + createOpts.ElasticIp = aws.String(v.(string)) + } - log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts) - resp, err := conn.CreateCluster(createOpts) - if err != nil { - log.Printf("[ERROR] Error creating Redshift Cluster: %s", err) - return err - } + if v, ok := d.GetOk("iam_roles"); ok { + createOpts.IamRoles = expandStringList(v.(*schema.Set).List()) + } - log.Printf("[DEBUG]: Cluster create response: %s", resp) - d.SetId(*resp.Cluster.ClusterIdentifier) + log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts) + resp, err := conn.CreateCluster(createOpts) + if err != nil { + log.Printf("[ERROR] Error creating Redshift Cluster: %s", err) + return err + } + + log.Printf("[DEBUG]: Cluster create response: %s", resp) + d.SetId(*resp.Cluster.ClusterIdentifier) + } stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "backing-up", "modifying"}, + Pending: []string{"creating", "backing-up", "modifying", "restoring"}, Target: []string{"available"}, Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta), Timeout: 40 * time.Minute, MinTimeout: 10 * time.Second, } - _, err = stateConf.WaitForState() + _, err := stateConf.WaitForState() if err != nil { return fmt.Errorf("[WARN] Error waiting for Redshift Cluster state to be \"available\": %s", err) } diff --git a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown index d6a697e7a..b0f96e88f 100644 --- a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown @@ -32,8 +32,8 @@ The following arguments are supported: string. * `database_name` - (Optional) The name of the first database to be created when the cluster is created. If you do not provide a name, Amazon Redshift will create a default database called `dev`. -* `node_type` - (Required) The node type to be provisioned for the cluster. -* `master_password` - (Required) Password for the master DB user. Note that this may +* `node_type` - (Optional) The node type to be provisioned for the cluster. +* `master_password` - (Optional) Password for the master DB user. Note that this may show up in logs, and it will be stored in the state file * `master_username` - (Required) Username for the master DB user * `cluster_security_groups` - (Optional) A list of security groups to be associated with this cluster. @@ -61,6 +61,8 @@ string. * `bucket_name` - (Optional, required when `enable_logging` is `true`) The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging) * `s3_key_prefix` - (Optional) The prefix applied to the log file names. +* `snapshot_identifier` - (Optional) The name of the snapshot from which to create the new cluster. +* `snapshot_cluster_identifier` - (Optional) The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name. * `tags` - (Optional) A mapping of tags to assign to the resource.