Merge pull request #19951 from hashicorp/s3-backend-nosuchbucket-enhanced-error
backend/s3: Configure AWS Client MaxRetries and provide enhanced S3 NoSuchBucket error message
This commit is contained in:
commit
05691a978e
|
@ -219,6 +219,13 @@ func New() backend.Backend {
|
||||||
Description: "Force s3 to use path style api.",
|
Description: "Force s3 to use path style api.",
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"max_retries": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Description: "The maximum number of times an AWS API request is retried on retryable failure.",
|
||||||
|
Default: 5,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,6 +292,7 @@ func (b *Backend) configure(ctx context.Context) error {
|
||||||
SkipRequestingAccountId: data.Get("skip_requesting_account_id").(bool),
|
SkipRequestingAccountId: data.Get("skip_requesting_account_id").(bool),
|
||||||
SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool),
|
SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool),
|
||||||
S3ForcePathStyle: data.Get("force_path_style").(bool),
|
S3ForcePathStyle: data.Get("force_path_style").(bool),
|
||||||
|
MaxRetries: data.Get("max_retries").(int),
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := cfg.Client()
|
client, err := cfg.Client()
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/backend"
|
"github.com/hashicorp/terraform/backend"
|
||||||
|
@ -29,6 +30,9 @@ func (b *Backend) Workspaces() ([]string, error) {
|
||||||
|
|
||||||
resp, err := b.s3Client.ListObjects(params)
|
resp, err := b.s3Client.ListObjects(params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == s3.ErrCodeNoSuchBucket {
|
||||||
|
return nil, fmt.Errorf(errS3NoSuchBucket, err)
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,30 +98,21 @@ func (c *RemoteClient) get() (*remote.Payload, error) {
|
||||||
var output *s3.GetObjectOutput
|
var output *s3.GetObjectOutput
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// we immediately retry on an internal error, as those are usually transient
|
output, err = c.s3Client.GetObject(&s3.GetObjectInput{
|
||||||
maxRetries := 2
|
Bucket: &c.bucketName,
|
||||||
for retryCount := 0; ; retryCount++ {
|
Key: &c.path,
|
||||||
output, err = c.s3Client.GetObject(&s3.GetObjectInput{
|
})
|
||||||
Bucket: &c.bucketName,
|
|
||||||
Key: &c.path,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awserr, ok := err.(awserr.Error); ok {
|
if awserr, ok := err.(awserr.Error); ok {
|
||||||
switch awserr.Code() {
|
switch awserr.Code() {
|
||||||
case s3.ErrCodeNoSuchKey:
|
case s3.ErrCodeNoSuchBucket:
|
||||||
return nil, nil
|
return nil, fmt.Errorf(errS3NoSuchBucket, err)
|
||||||
case s3ErrCodeInternalError:
|
case s3.ErrCodeNoSuchKey:
|
||||||
if retryCount > maxRetries {
|
return nil, nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.Println("[WARN] s3 internal error, retrying...")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
break
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer output.Body.Close()
|
defer output.Body.Close()
|
||||||
|
@ -149,46 +140,32 @@ func (c *RemoteClient) Put(data []byte) error {
|
||||||
contentType := "application/json"
|
contentType := "application/json"
|
||||||
contentLength := int64(len(data))
|
contentLength := int64(len(data))
|
||||||
|
|
||||||
// we immediately retry on an internal error, as those are usually transient
|
i := &s3.PutObjectInput{
|
||||||
maxRetries := 2
|
ContentType: &contentType,
|
||||||
for retryCount := 0; ; retryCount++ {
|
ContentLength: &contentLength,
|
||||||
i := &s3.PutObjectInput{
|
Body: bytes.NewReader(data),
|
||||||
ContentType: &contentType,
|
Bucket: &c.bucketName,
|
||||||
ContentLength: &contentLength,
|
Key: &c.path,
|
||||||
Body: bytes.NewReader(data),
|
}
|
||||||
Bucket: &c.bucketName,
|
|
||||||
Key: &c.path,
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.serverSideEncryption {
|
if c.serverSideEncryption {
|
||||||
if c.kmsKeyID != "" {
|
if c.kmsKeyID != "" {
|
||||||
i.SSEKMSKeyId = &c.kmsKeyID
|
i.SSEKMSKeyId = &c.kmsKeyID
|
||||||
i.ServerSideEncryption = aws.String("aws:kms")
|
i.ServerSideEncryption = aws.String("aws:kms")
|
||||||
} else {
|
} else {
|
||||||
i.ServerSideEncryption = aws.String("AES256")
|
i.ServerSideEncryption = aws.String("AES256")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if c.acl != "" {
|
if c.acl != "" {
|
||||||
i.ACL = aws.String(c.acl)
|
i.ACL = aws.String(c.acl)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
|
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
|
||||||
|
|
||||||
_, err := c.s3Client.PutObject(i)
|
_, err := c.s3Client.PutObject(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awserr, ok := err.(awserr.Error); ok {
|
return fmt.Errorf("failed to upload state: %s", err)
|
||||||
if awserr.Code() == s3ErrCodeInternalError {
|
|
||||||
if retryCount > maxRetries {
|
|
||||||
return fmt.Errorf("failed to upload state: %s", err)
|
|
||||||
}
|
|
||||||
log.Println("[WARN] s3 internal error, retrying...")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("failed to upload state: %s", err)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sum := md5.Sum(data)
|
sum := md5.Sum(data)
|
||||||
|
@ -414,3 +391,12 @@ persists, and neither S3 nor DynamoDB are experiencing an outage, you may need
|
||||||
to manually verify the remote state and update the Digest value stored in the
|
to manually verify the remote state and update the Digest value stored in the
|
||||||
DynamoDB table to the following value: %x
|
DynamoDB table to the following value: %x
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const errS3NoSuchBucket = `S3 bucket does not exist.
|
||||||
|
|
||||||
|
The referenced S3 bucket must have been previously created. If the S3 bucket
|
||||||
|
was created within the last minute, please wait for a minute or two and try
|
||||||
|
again.
|
||||||
|
|
||||||
|
Error: %s
|
||||||
|
`
|
||||||
|
|
|
@ -180,6 +180,7 @@ The following configuration options or environment variables are supported:
|
||||||
* `skip_region_validation` - (Optional) Skip validation of provided region name.
|
* `skip_region_validation` - (Optional) Skip validation of provided region name.
|
||||||
* `skip_requesting_account_id` - (Optional) Skip requesting the account ID.
|
* `skip_requesting_account_id` - (Optional) Skip requesting the account ID.
|
||||||
* `skip_metadata_api_check` - (Optional) Skip the AWS Metadata API check.
|
* `skip_metadata_api_check` - (Optional) Skip the AWS Metadata API check.
|
||||||
|
* `max_retries` - (Optional) The maximum number of times an AWS API request is retried on retryable failure. Defaults to 5.
|
||||||
|
|
||||||
## Multi-account AWS Architecture
|
## Multi-account AWS Architecture
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue