backend/s3: Configure AWS Client MaxRetries and provide enhanced S3 NoSuchBucket error message
The AWS Go SDK automatically provides a default request retryer with exponential backoff that is invoked via setting `MaxRetries` or leaving it `nil` will default to 3. The terraform-aws-provider `config.Client()` sets `MaxRetries` to 0 unless explicitly configured above 0. Previously, we were not overriding this behavior by setting the configuration and therefore not invoking the default request retryer. The default retryer already handles HTTP error codes above 500, including S3's InternalError response, so the extraneous handling can be removed. This will also start automatically retrying many additional cases, such as temporary networking issues or other retryable AWS service responses. Changes: * s3/backend: Add `max_retries` argument * s3/backend: Enhance S3 NoSuchBucket error to include additional information
This commit is contained in:
parent
e997373f44
commit
ed37d07632
|
@ -219,6 +219,13 @@ func New() backend.Backend {
|
|||
Description: "Force s3 to use path style api.",
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"max_retries": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "The maximum number of times an AWS API request is retried on retryable failure.",
|
||||
Default: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -285,6 +292,7 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
SkipRequestingAccountId: data.Get("skip_requesting_account_id").(bool),
|
||||
SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool),
|
||||
S3ForcePathStyle: data.Get("force_path_style").(bool),
|
||||
MaxRetries: data.Get("max_retries").(int),
|
||||
}
|
||||
|
||||
client, err := cfg.Client()
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
|
@ -29,6 +30,9 @@ func (b *Backend) Workspaces() ([]string, error) {
|
|||
|
||||
resp, err := b.s3Client.ListObjects(params)
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == s3.ErrCodeNoSuchBucket {
|
||||
return nil, fmt.Errorf(errS3NoSuchBucket, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -98,9 +98,6 @@ func (c *RemoteClient) get() (*remote.Payload, error) {
|
|||
var output *s3.GetObjectOutput
|
||||
var err error
|
||||
|
||||
// we immediately retry on an internal error, as those are usually transient
|
||||
maxRetries := 2
|
||||
for retryCount := 0; ; retryCount++ {
|
||||
output, err = c.s3Client.GetObject(&s3.GetObjectInput{
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.path,
|
||||
|
@ -109,20 +106,14 @@ func (c *RemoteClient) get() (*remote.Payload, error) {
|
|||
if err != nil {
|
||||
if awserr, ok := err.(awserr.Error); ok {
|
||||
switch awserr.Code() {
|
||||
case s3.ErrCodeNoSuchBucket:
|
||||
return nil, fmt.Errorf(errS3NoSuchBucket, err)
|
||||
case s3.ErrCodeNoSuchKey:
|
||||
return nil, nil
|
||||
case s3ErrCodeInternalError:
|
||||
if retryCount > maxRetries {
|
||||
return nil, err
|
||||
}
|
||||
log.Println("[WARN] s3 internal error, retrying...")
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
defer output.Body.Close()
|
||||
|
||||
|
@ -149,9 +140,6 @@ func (c *RemoteClient) Put(data []byte) error {
|
|||
contentType := "application/json"
|
||||
contentLength := int64(len(data))
|
||||
|
||||
// we immediately retry on an internal error, as those are usually transient
|
||||
maxRetries := 2
|
||||
for retryCount := 0; ; retryCount++ {
|
||||
i := &s3.PutObjectInput{
|
||||
ContentType: &contentType,
|
||||
ContentLength: &contentLength,
|
||||
|
@ -177,19 +165,8 @@ func (c *RemoteClient) Put(data []byte) error {
|
|||
|
||||
_, err := c.s3Client.PutObject(i)
|
||||
if err != nil {
|
||||
if awserr, ok := err.(awserr.Error); ok {
|
||||
if awserr.Code() == s3ErrCodeInternalError {
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("failed to upload state: %s", err)
|
||||
}
|
||||
log.Println("[WARN] s3 internal error, retrying...")
|
||||
continue
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to upload state: %s", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
sum := md5.Sum(data)
|
||||
if err := c.putMD5(sum[:]); err != nil {
|
||||
|
@ -414,3 +391,12 @@ persists, and neither S3 nor DynamoDB are experiencing an outage, you may need
|
|||
to manually verify the remote state and update the Digest value stored in the
|
||||
DynamoDB table to the following value: %x
|
||||
`
|
||||
|
||||
const errS3NoSuchBucket = `S3 bucket does not exist.
|
||||
|
||||
The referenced S3 bucket must have been previously created. If the S3 bucket
|
||||
was created within the last minute, please wait for a minute or two and try
|
||||
again.
|
||||
|
||||
Error: %s
|
||||
`
|
||||
|
|
|
@ -180,6 +180,7 @@ The following configuration options or environment variables are supported:
|
|||
* `skip_region_validation` - (Optional) Skip validation of provided region name.
|
||||
* `skip_requesting_account_id` - (Optional) Skip requesting the account ID.
|
||||
* `skip_metadata_api_check` - (Optional) Skip the AWS Metadata API check.
|
||||
* `max_retries` - (Optional) The maximum number of times an AWS API request is retried on retryable failure. Defaults to 5.
|
||||
|
||||
## Multi-account AWS Architecture
|
||||
|
||||
|
|
Loading…
Reference in New Issue