Merge branch 'pr-10113'

* pr-10113:
  Added retry_option for aws_kinesis_firehose_stream redshift_configuration
This commit is contained in:
clint shryock 2016-11-15 10:56:16 -06:00
commit 0369112057
2 changed files with 30 additions and 4 deletions

View File

@ -195,6 +195,20 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource {
Required: true,
},
"retry_duration": {
Type: schema.TypeInt,
Optional: true,
Default: 3600,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(int)
if value < 0 || value > 7200 {
errors = append(errors, fmt.Errorf(
"%q must be in the range from 0 to 7200 seconds.", k))
}
return
},
},
"copy_options": {
Type: schema.TypeString,
Optional: true,
@ -446,6 +460,7 @@ func createRedshiftConfig(d *schema.ResourceData, s3Config *firehose.S3Destinati
configuration := &firehose.RedshiftDestinationConfiguration{
ClusterJDBCURL: aws.String(redshift["cluster_jdbcurl"].(string)),
RetryOptions: extractRedshiftRetryOptions(redshift),
Password: aws.String(redshift["password"].(string)),
Username: aws.String(redshift["username"].(string)),
RoleARN: aws.String(redshift["role_arn"].(string)),
@ -471,6 +486,7 @@ func updateRedshiftConfig(d *schema.ResourceData, s3Update *firehose.S3Destinati
configuration := &firehose.RedshiftDestinationUpdate{
ClusterJDBCURL: aws.String(redshift["cluster_jdbcurl"].(string)),
RetryOptions: extractRedshiftRetryOptions(redshift),
Password: aws.String(redshift["password"].(string)),
Username: aws.String(redshift["username"].(string)),
RoleARN: aws.String(redshift["role_arn"].(string)),
@ -498,7 +514,7 @@ func createElasticsearchConfig(d *schema.ResourceData, s3Config *firehose.S3Dest
BufferingHints: extractBufferingHints(es),
DomainARN: aws.String(es["domain_arn"].(string)),
IndexName: aws.String(es["index_name"].(string)),
RetryOptions: extractRetryOptions(es),
RetryOptions: extractElasticSearchRetryOptions(es),
RoleARN: aws.String(es["role_arn"].(string)),
TypeName: aws.String(es["type_name"].(string)),
S3Configuration: s3Config,
@ -531,7 +547,7 @@ func updateElasticsearchConfig(d *schema.ResourceData, s3Update *firehose.S3Dest
BufferingHints: extractBufferingHints(es),
DomainARN: aws.String(es["domain_arn"].(string)),
IndexName: aws.String(es["index_name"].(string)),
RetryOptions: extractRetryOptions(es),
RetryOptions: extractElasticSearchRetryOptions(es),
RoleARN: aws.String(es["role_arn"].(string)),
TypeName: aws.String(es["type_name"].(string)),
S3Update: s3Update,
@ -559,10 +575,9 @@ func extractBufferingHints(es map[string]interface{}) *firehose.ElasticsearchBuf
}
return bufferingHints
}
func extractRetryOptions(es map[string]interface{}) *firehose.ElasticsearchRetryOptions {
func extractElasticSearchRetryOptions(es map[string]interface{}) *firehose.ElasticsearchRetryOptions {
retryOptions := &firehose.ElasticsearchRetryOptions{}
if retryDuration, ok := es["retry_duration"].(int); ok {
@ -572,6 +587,16 @@ func extractRetryOptions(es map[string]interface{}) *firehose.ElasticsearchRetry
return retryOptions
}
func extractRedshiftRetryOptions(redshift map[string]interface{}) *firehose.RedshiftRetryOptions {
retryOptions := &firehose.RedshiftRetryOptions{}
if retryDuration, ok := redshift["retry_duration"].(int); ok {
retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration))
}
return retryOptions
}
func extractCopyCommandConfiguration(redshift map[string]interface{}) *firehose.CopyCommand {
cmd := &firehose.CopyCommand{
DataTableName: aws.String(redshift["data_table_name"].(string)),

View File

@ -144,6 +144,7 @@ The `redshift_configuration` object supports the following:
* `cluster_jdbcurl` - (Required) The jdbcurl of the redshift cluster.
* `username` - (Required) The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.
* `password` - (Required) The password for the username above.
* `retry_duration` - (Optional) The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
* `role_arn` - (Required) The arn of the role the stream assumes.
* `data_table_name` - (Required) The name of the table in the redshift cluster that the s3 bucket will copy to.
* `copy_options` - (Optional) Copy options for copying the data from the s3 intermediate bucket into redshift.