Merge pull request #19951 from hashicorp/s3-backend-nosuchbucket-enhanced-error
backend/s3: Configure AWS Client MaxRetries and provide enhanced S3 NoSuchBucket error message
This commit is contained in:
commit
05691a978e
|
@ -219,6 +219,13 @@ func New() backend.Backend {
|
|||
Description: "Force s3 to use path style api.",
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"max_retries": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "The maximum number of times an AWS API request is retried on retryable failure.",
|
||||
Default: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -285,6 +292,7 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
SkipRequestingAccountId: data.Get("skip_requesting_account_id").(bool),
|
||||
SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool),
|
||||
S3ForcePathStyle: data.Get("force_path_style").(bool),
|
||||
MaxRetries: data.Get("max_retries").(int),
|
||||
}
|
||||
|
||||
client, err := cfg.Client()
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
|
@ -29,6 +30,9 @@ func (b *Backend) Workspaces() ([]string, error) {
|
|||
|
||||
resp, err := b.s3Client.ListObjects(params)
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == s3.ErrCodeNoSuchBucket {
|
||||
return nil, fmt.Errorf(errS3NoSuchBucket, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -98,30 +98,21 @@ func (c *RemoteClient) get() (*remote.Payload, error) {
|
|||
var output *s3.GetObjectOutput
|
||||
var err error
|
||||
|
||||
// we immediately retry on an internal error, as those are usually transient
|
||||
maxRetries := 2
|
||||
for retryCount := 0; ; retryCount++ {
|
||||
output, err = c.s3Client.GetObject(&s3.GetObjectInput{
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.path,
|
||||
})
|
||||
output, err = c.s3Client.GetObject(&s3.GetObjectInput{
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.path,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if awserr, ok := err.(awserr.Error); ok {
|
||||
switch awserr.Code() {
|
||||
case s3.ErrCodeNoSuchKey:
|
||||
return nil, nil
|
||||
case s3ErrCodeInternalError:
|
||||
if retryCount > maxRetries {
|
||||
return nil, err
|
||||
}
|
||||
log.Println("[WARN] s3 internal error, retrying...")
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
if awserr, ok := err.(awserr.Error); ok {
|
||||
switch awserr.Code() {
|
||||
case s3.ErrCodeNoSuchBucket:
|
||||
return nil, fmt.Errorf(errS3NoSuchBucket, err)
|
||||
case s3.ErrCodeNoSuchKey:
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer output.Body.Close()
|
||||
|
@ -149,46 +140,32 @@ func (c *RemoteClient) Put(data []byte) error {
|
|||
contentType := "application/json"
|
||||
contentLength := int64(len(data))
|
||||
|
||||
// we immediately retry on an internal error, as those are usually transient
|
||||
maxRetries := 2
|
||||
for retryCount := 0; ; retryCount++ {
|
||||
i := &s3.PutObjectInput{
|
||||
ContentType: &contentType,
|
||||
ContentLength: &contentLength,
|
||||
Body: bytes.NewReader(data),
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.path,
|
||||
}
|
||||
i := &s3.PutObjectInput{
|
||||
ContentType: &contentType,
|
||||
ContentLength: &contentLength,
|
||||
Body: bytes.NewReader(data),
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.path,
|
||||
}
|
||||
|
||||
if c.serverSideEncryption {
|
||||
if c.kmsKeyID != "" {
|
||||
i.SSEKMSKeyId = &c.kmsKeyID
|
||||
i.ServerSideEncryption = aws.String("aws:kms")
|
||||
} else {
|
||||
i.ServerSideEncryption = aws.String("AES256")
|
||||
}
|
||||
if c.serverSideEncryption {
|
||||
if c.kmsKeyID != "" {
|
||||
i.SSEKMSKeyId = &c.kmsKeyID
|
||||
i.ServerSideEncryption = aws.String("aws:kms")
|
||||
} else {
|
||||
i.ServerSideEncryption = aws.String("AES256")
|
||||
}
|
||||
}
|
||||
|
||||
if c.acl != "" {
|
||||
i.ACL = aws.String(c.acl)
|
||||
}
|
||||
if c.acl != "" {
|
||||
i.ACL = aws.String(c.acl)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
|
||||
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
|
||||
|
||||
_, err := c.s3Client.PutObject(i)
|
||||
if err != nil {
|
||||
if awserr, ok := err.(awserr.Error); ok {
|
||||
if awserr.Code() == s3ErrCodeInternalError {
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("failed to upload state: %s", err)
|
||||
}
|
||||
log.Println("[WARN] s3 internal error, retrying...")
|
||||
continue
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to upload state: %s", err)
|
||||
}
|
||||
break
|
||||
_, err := c.s3Client.PutObject(i)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload state: %s", err)
|
||||
}
|
||||
|
||||
sum := md5.Sum(data)
|
||||
|
@ -414,3 +391,12 @@ persists, and neither S3 nor DynamoDB are experiencing an outage, you may need
|
|||
to manually verify the remote state and update the Digest value stored in the
|
||||
DynamoDB table to the following value: %x
|
||||
`
|
||||
|
||||
const errS3NoSuchBucket = `S3 bucket does not exist.
|
||||
|
||||
The referenced S3 bucket must have been previously created. If the S3 bucket
|
||||
was created within the last minute, please wait for a minute or two and try
|
||||
again.
|
||||
|
||||
Error: %s
|
||||
`
|
||||
|
|
|
@ -180,6 +180,7 @@ The following configuration options or environment variables are supported:
|
|||
* `skip_region_validation` - (Optional) Skip validation of provided region name.
|
||||
* `skip_requesting_account_id` - (Optional) Skip requesting the account ID.
|
||||
* `skip_metadata_api_check` - (Optional) Skip the AWS Metadata API check.
|
||||
* `max_retries` - (Optional) The maximum number of times an AWS API request is retried on retryable failure. Defaults to 5.
|
||||
|
||||
## Multi-account AWS Architecture
|
||||
|
||||
|
|
Loading…
Reference in New Issue