2017-03-21 18:43:31 +01:00
|
|
|
package s3
|
2015-04-30 18:21:49 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2017-05-19 17:51:46 +02:00
|
|
|
"crypto/md5"
|
2019-07-05 17:54:07 +02:00
|
|
|
"encoding/base64"
|
2017-05-19 17:51:46 +02:00
|
|
|
"encoding/hex"
|
2017-02-07 17:12:02 +01:00
|
|
|
"encoding/json"
|
2017-05-19 17:51:46 +02:00
|
|
|
"errors"
|
2015-04-30 18:21:49 +02:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2015-09-14 11:38:29 +02:00
|
|
|
"log"
|
2017-05-19 17:51:46 +02:00
|
|
|
"time"
|
2015-04-30 18:21:49 +02:00
|
|
|
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
2017-01-12 22:55:42 +01:00
|
|
|
"github.com/aws/aws-sdk-go/service/dynamodb"
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
2017-03-21 18:43:31 +01:00
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2017-02-15 00:00:59 +01:00
|
|
|
uuid "github.com/hashicorp/go-uuid"
|
2021-05-17 21:43:35 +02:00
|
|
|
"github.com/hashicorp/terraform/internal/states/remote"
|
|
|
|
"github.com/hashicorp/terraform/internal/states/statemgr"
|
2015-04-30 18:21:49 +02:00
|
|
|
)
|
|
|
|
|
2017-05-19 17:51:46 +02:00
|
|
|
// Store the last saved serial in dynamo with this suffix for consistency checks.
|
2017-10-01 14:22:04 +02:00
|
|
|
const (
|
2019-07-05 17:54:07 +02:00
|
|
|
s3EncryptionAlgorithm = "AES256"
|
2017-10-01 14:22:04 +02:00
|
|
|
stateIDSuffix = "-md5"
|
|
|
|
s3ErrCodeInternalError = "InternalError"
|
|
|
|
)
|
2017-05-19 17:51:46 +02:00
|
|
|
|
2017-03-22 20:52:55 +01:00
|
|
|
type RemoteClient struct {
|
2019-07-05 17:54:07 +02:00
|
|
|
s3Client *s3.S3
|
|
|
|
dynClient *dynamodb.DynamoDB
|
|
|
|
bucketName string
|
|
|
|
path string
|
|
|
|
serverSideEncryption bool
|
|
|
|
customerEncryptionKey []byte
|
|
|
|
acl string
|
|
|
|
kmsKeyID string
|
|
|
|
ddbTable string
|
2015-04-30 18:21:49 +02:00
|
|
|
}
|
|
|
|
|
2017-05-19 17:51:46 +02:00
|
|
|
var (
|
|
|
|
// The amount of time we will retry a state waiting for it to match the
|
|
|
|
// expected checksum.
|
|
|
|
consistencyRetryTimeout = 10 * time.Second
|
|
|
|
|
|
|
|
// delay when polling the state
|
|
|
|
consistencyRetryPollInterval = 2 * time.Second
|
|
|
|
)
|
|
|
|
|
|
|
|
// test hook called when checksums don't match
|
|
|
|
var testChecksumHook func()
|
|
|
|
|
|
|
|
func (c *RemoteClient) Get() (payload *remote.Payload, err error) {
|
|
|
|
deadline := time.Now().Add(consistencyRetryTimeout)
|
|
|
|
|
|
|
|
// If we have a checksum, and the returned payload doesn't match, we retry
|
|
|
|
// up until deadline.
|
|
|
|
for {
|
|
|
|
payload, err = c.get()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-05-30 19:59:16 +02:00
|
|
|
// If the remote state was manually removed the payload will be nil,
|
|
|
|
// but if there's still a digest entry for that state we will still try
|
|
|
|
// to compare the MD5 below.
|
|
|
|
var digest []byte
|
|
|
|
if payload != nil {
|
|
|
|
digest = payload.MD5
|
|
|
|
}
|
|
|
|
|
2017-05-19 17:51:46 +02:00
|
|
|
// verify that this state is what we expect
|
|
|
|
if expected, err := c.getMD5(); err != nil {
|
2018-01-17 03:05:26 +01:00
|
|
|
log.Printf("[WARN] failed to fetch state md5: %s", err)
|
2017-05-30 19:59:16 +02:00
|
|
|
} else if len(expected) > 0 && !bytes.Equal(expected, digest) {
|
2018-01-17 03:05:26 +01:00
|
|
|
log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest)
|
2017-05-19 17:51:46 +02:00
|
|
|
|
|
|
|
if testChecksumHook != nil {
|
|
|
|
testChecksumHook()
|
|
|
|
}
|
|
|
|
|
|
|
|
if time.Now().Before(deadline) {
|
|
|
|
time.Sleep(consistencyRetryPollInterval)
|
|
|
|
log.Println("[INFO] retrying S3 RemoteClient.Get...")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-05-30 19:59:16 +02:00
|
|
|
return nil, fmt.Errorf(errBadChecksumFmt, digest)
|
2017-05-19 17:51:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
return payload, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *RemoteClient) get() (*remote.Payload, error) {
|
2017-10-01 14:22:04 +02:00
|
|
|
var output *s3.GetObjectOutput
|
|
|
|
var err error
|
|
|
|
|
2019-07-05 17:54:07 +02:00
|
|
|
input := &s3.GetObjectInput{
|
2019-01-09 19:01:37 +01:00
|
|
|
Bucket: &c.bucketName,
|
|
|
|
Key: &c.path,
|
2019-07-05 17:54:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if c.serverSideEncryption && c.customerEncryptionKey != nil {
|
|
|
|
input.SetSSECustomerKey(string(c.customerEncryptionKey))
|
|
|
|
input.SetSSECustomerAlgorithm(s3EncryptionAlgorithm)
|
|
|
|
input.SetSSECustomerKeyMD5(c.getSSECustomerKeyMD5())
|
|
|
|
}
|
|
|
|
|
|
|
|
output, err = c.s3Client.GetObject(input)
|
2017-10-01 14:22:04 +02:00
|
|
|
|
2019-01-09 19:01:37 +01:00
|
|
|
if err != nil {
|
|
|
|
if awserr, ok := err.(awserr.Error); ok {
|
|
|
|
switch awserr.Code() {
|
|
|
|
case s3.ErrCodeNoSuchBucket:
|
|
|
|
return nil, fmt.Errorf(errS3NoSuchBucket, err)
|
|
|
|
case s3.ErrCodeNoSuchKey:
|
|
|
|
return nil, nil
|
2015-04-30 18:21:49 +02:00
|
|
|
}
|
|
|
|
}
|
2019-01-09 19:01:37 +01:00
|
|
|
return nil, err
|
2015-04-30 18:21:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
defer output.Body.Close()
|
|
|
|
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
if _, err := io.Copy(buf, output.Body); err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to read remote state: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-05-19 17:51:46 +02:00
|
|
|
sum := md5.Sum(buf.Bytes())
|
2017-03-21 18:43:31 +01:00
|
|
|
payload := &remote.Payload{
|
2015-04-30 18:21:49 +02:00
|
|
|
Data: buf.Bytes(),
|
2017-05-19 17:51:46 +02:00
|
|
|
MD5: sum[:],
|
2015-04-30 18:21:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there was no data, then return nil
|
|
|
|
if len(payload.Data) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return payload, nil
|
|
|
|
}
|
|
|
|
|
2017-03-22 20:52:55 +01:00
|
|
|
func (c *RemoteClient) Put(data []byte) error {
|
2015-10-02 00:14:55 +02:00
|
|
|
contentType := "application/json"
|
2015-04-30 18:21:49 +02:00
|
|
|
contentLength := int64(len(data))
|
|
|
|
|
2019-01-09 19:01:37 +01:00
|
|
|
i := &s3.PutObjectInput{
|
|
|
|
ContentType: &contentType,
|
|
|
|
ContentLength: &contentLength,
|
|
|
|
Body: bytes.NewReader(data),
|
|
|
|
Bucket: &c.bucketName,
|
|
|
|
Key: &c.path,
|
|
|
|
}
|
2015-06-19 20:33:03 +02:00
|
|
|
|
2019-01-09 19:01:37 +01:00
|
|
|
if c.serverSideEncryption {
|
|
|
|
if c.kmsKeyID != "" {
|
|
|
|
i.SSEKMSKeyId = &c.kmsKeyID
|
|
|
|
i.ServerSideEncryption = aws.String("aws:kms")
|
2019-07-05 17:54:07 +02:00
|
|
|
} else if c.customerEncryptionKey != nil {
|
|
|
|
i.SetSSECustomerKey(string(c.customerEncryptionKey))
|
|
|
|
i.SetSSECustomerAlgorithm(s3EncryptionAlgorithm)
|
|
|
|
i.SetSSECustomerKeyMD5(c.getSSECustomerKeyMD5())
|
2019-01-09 19:01:37 +01:00
|
|
|
} else {
|
2019-07-05 17:54:07 +02:00
|
|
|
i.ServerSideEncryption = aws.String(s3EncryptionAlgorithm)
|
2015-07-31 09:09:28 +02:00
|
|
|
}
|
2019-01-09 19:01:37 +01:00
|
|
|
}
|
2015-06-19 20:33:03 +02:00
|
|
|
|
2019-01-09 19:01:37 +01:00
|
|
|
if c.acl != "" {
|
|
|
|
i.ACL = aws.String(c.acl)
|
|
|
|
}
|
2015-09-14 11:38:29 +02:00
|
|
|
|
2019-01-09 19:01:37 +01:00
|
|
|
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
|
2015-09-14 11:38:29 +02:00
|
|
|
|
2019-01-09 19:01:37 +01:00
|
|
|
_, err := c.s3Client.PutObject(i)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to upload state: %s", err)
|
2015-04-30 18:21:49 +02:00
|
|
|
}
|
2017-05-19 17:51:46 +02:00
|
|
|
|
|
|
|
sum := md5.Sum(data)
|
|
|
|
if err := c.putMD5(sum[:]); err != nil {
|
|
|
|
// if this errors out, we unfortunately have to error out altogether,
|
|
|
|
// since the next Get will inevitably fail.
|
|
|
|
return fmt.Errorf("failed to store state MD5: %s", err)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2015-04-30 18:21:49 +02:00
|
|
|
}
|
|
|
|
|
2017-03-22 20:52:55 +01:00
|
|
|
func (c *RemoteClient) Delete() error {
|
|
|
|
_, err := c.s3Client.DeleteObject(&s3.DeleteObjectInput{
|
2015-04-30 18:21:49 +02:00
|
|
|
Bucket: &c.bucketName,
|
2017-03-22 20:52:55 +01:00
|
|
|
Key: &c.path,
|
2015-04-30 18:21:49 +02:00
|
|
|
})
|
|
|
|
|
2017-05-19 17:51:46 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.deleteMD5(); err != nil {
|
|
|
|
log.Printf("error deleting state md5: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2015-04-30 18:21:49 +02:00
|
|
|
}
|
2017-01-12 22:55:42 +01:00
|
|
|
|
2020-08-11 17:43:01 +02:00
|
|
|
func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
|
2017-05-26 01:12:20 +02:00
|
|
|
if c.ddbTable == "" {
|
2017-02-15 00:00:59 +01:00
|
|
|
return "", nil
|
2017-01-12 22:55:42 +01:00
|
|
|
}
|
|
|
|
|
2017-03-30 19:36:54 +02:00
|
|
|
info.Path = c.lockPath()
|
2017-02-15 00:00:59 +01:00
|
|
|
|
2017-02-15 16:25:04 +01:00
|
|
|
if info.ID == "" {
|
|
|
|
lockID, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2017-01-12 22:55:42 +01:00
|
|
|
|
2017-02-15 16:25:04 +01:00
|
|
|
info.ID = lockID
|
|
|
|
}
|
2017-02-15 00:00:59 +01:00
|
|
|
|
2017-01-12 22:55:42 +01:00
|
|
|
putParams := &dynamodb.PutItemInput{
|
|
|
|
Item: map[string]*dynamodb.AttributeValue{
|
2017-03-30 19:36:54 +02:00
|
|
|
"LockID": {S: aws.String(c.lockPath())},
|
2017-02-15 20:01:18 +01:00
|
|
|
"Info": {S: aws.String(string(info.Marshal()))},
|
2017-01-12 22:55:42 +01:00
|
|
|
},
|
2017-05-26 01:12:20 +02:00
|
|
|
TableName: aws.String(c.ddbTable),
|
2017-01-12 22:55:42 +01:00
|
|
|
ConditionExpression: aws.String("attribute_not_exists(LockID)"),
|
|
|
|
}
|
2017-02-15 16:25:04 +01:00
|
|
|
_, err := c.dynClient.PutItem(putParams)
|
2017-01-12 22:55:42 +01:00
|
|
|
|
|
|
|
if err != nil {
|
2017-02-15 20:01:18 +01:00
|
|
|
lockInfo, infoErr := c.getLockInfo()
|
|
|
|
if infoErr != nil {
|
|
|
|
err = multierror.Append(err, infoErr)
|
2017-02-07 17:12:02 +01:00
|
|
|
}
|
2017-01-12 22:55:42 +01:00
|
|
|
|
2020-08-11 17:43:01 +02:00
|
|
|
lockErr := &statemgr.LockError{
|
2017-02-15 20:01:18 +01:00
|
|
|
Err: err,
|
|
|
|
Info: lockInfo,
|
|
|
|
}
|
|
|
|
return "", lockErr
|
2017-01-12 22:55:42 +01:00
|
|
|
}
|
2017-05-19 17:51:46 +02:00
|
|
|
|
2017-02-15 16:25:04 +01:00
|
|
|
return info.ID, nil
|
2017-01-12 22:55:42 +01:00
|
|
|
}
|
|
|
|
|
2017-05-19 17:51:46 +02:00
|
|
|
func (c *RemoteClient) getMD5() ([]byte, error) {
|
2017-05-26 01:12:20 +02:00
|
|
|
if c.ddbTable == "" {
|
2017-05-19 17:51:46 +02:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
getParams := &dynamodb.GetItemInput{
|
|
|
|
Key: map[string]*dynamodb.AttributeValue{
|
|
|
|
"LockID": {S: aws.String(c.lockPath() + stateIDSuffix)},
|
|
|
|
},
|
|
|
|
ProjectionExpression: aws.String("LockID, Digest"),
|
2017-05-26 01:12:20 +02:00
|
|
|
TableName: aws.String(c.ddbTable),
|
2017-10-27 22:43:30 +02:00
|
|
|
ConsistentRead: aws.Bool(true),
|
2017-05-19 17:51:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := c.dynClient.GetItem(getParams)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var val string
|
|
|
|
if v, ok := resp.Item["Digest"]; ok && v.S != nil {
|
|
|
|
val = *v.S
|
|
|
|
}
|
|
|
|
|
|
|
|
sum, err := hex.DecodeString(val)
|
|
|
|
if err != nil || len(sum) != md5.Size {
|
|
|
|
return nil, errors.New("invalid md5")
|
|
|
|
}
|
|
|
|
|
|
|
|
return sum, nil
|
|
|
|
}
|
|
|
|
|
2019-06-15 15:20:37 +02:00
|
|
|
// store the hash of the state so that clients can check for stale state files.
|
2017-05-19 17:51:46 +02:00
|
|
|
func (c *RemoteClient) putMD5(sum []byte) error {
|
2017-05-26 01:12:20 +02:00
|
|
|
if c.ddbTable == "" {
|
2017-05-19 17:51:46 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(sum) != md5.Size {
|
|
|
|
return errors.New("invalid payload md5")
|
|
|
|
}
|
|
|
|
|
|
|
|
putParams := &dynamodb.PutItemInput{
|
|
|
|
Item: map[string]*dynamodb.AttributeValue{
|
|
|
|
"LockID": {S: aws.String(c.lockPath() + stateIDSuffix)},
|
|
|
|
"Digest": {S: aws.String(hex.EncodeToString(sum))},
|
|
|
|
},
|
2017-05-26 01:12:20 +02:00
|
|
|
TableName: aws.String(c.ddbTable),
|
2017-05-19 17:51:46 +02:00
|
|
|
}
|
|
|
|
_, err := c.dynClient.PutItem(putParams)
|
|
|
|
if err != nil {
|
2018-01-17 03:05:26 +01:00
|
|
|
log.Printf("[WARN] failed to record state serial in dynamodb: %s", err)
|
2017-05-19 17:51:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove the hash value for a deleted state
|
|
|
|
func (c *RemoteClient) deleteMD5() error {
|
2017-05-26 01:12:20 +02:00
|
|
|
if c.ddbTable == "" {
|
2017-05-19 17:51:46 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
params := &dynamodb.DeleteItemInput{
|
|
|
|
Key: map[string]*dynamodb.AttributeValue{
|
|
|
|
"LockID": {S: aws.String(c.lockPath() + stateIDSuffix)},
|
|
|
|
},
|
2017-05-26 01:12:20 +02:00
|
|
|
TableName: aws.String(c.ddbTable),
|
2017-05-19 17:51:46 +02:00
|
|
|
}
|
|
|
|
if _, err := c.dynClient.DeleteItem(params); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-11 17:43:01 +02:00
|
|
|
func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) {
|
2017-02-15 18:51:57 +01:00
|
|
|
getParams := &dynamodb.GetItemInput{
|
|
|
|
Key: map[string]*dynamodb.AttributeValue{
|
2017-03-30 19:36:54 +02:00
|
|
|
"LockID": {S: aws.String(c.lockPath())},
|
2017-02-15 18:51:57 +01:00
|
|
|
},
|
|
|
|
ProjectionExpression: aws.String("LockID, Info"),
|
2017-05-26 01:12:20 +02:00
|
|
|
TableName: aws.String(c.ddbTable),
|
2017-10-27 22:43:30 +02:00
|
|
|
ConsistentRead: aws.Bool(true),
|
2017-02-15 18:51:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := c.dynClient.GetItem(getParams)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var infoData string
|
|
|
|
if v, ok := resp.Item["Info"]; ok && v.S != nil {
|
|
|
|
infoData = *v.S
|
|
|
|
}
|
|
|
|
|
2020-08-11 17:43:01 +02:00
|
|
|
lockInfo := &statemgr.LockInfo{}
|
2017-02-15 18:51:57 +01:00
|
|
|
err = json.Unmarshal([]byte(infoData), lockInfo)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return lockInfo, nil
|
|
|
|
}
|
|
|
|
|
2017-03-22 20:52:55 +01:00
|
|
|
func (c *RemoteClient) Unlock(id string) error {
|
2017-05-26 01:12:20 +02:00
|
|
|
if c.ddbTable == "" {
|
2017-01-12 22:55:42 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-11 17:43:01 +02:00
|
|
|
lockErr := &statemgr.LockError{}
|
2017-02-15 20:01:18 +01:00
|
|
|
|
2017-02-15 18:51:57 +01:00
|
|
|
// TODO: store the path and lock ID in separate fields, and have proper
|
|
|
|
// projection expression only delete the lock if both match, rather than
|
|
|
|
// checking the ID from the info field first.
|
|
|
|
lockInfo, err := c.getLockInfo()
|
|
|
|
if err != nil {
|
2017-02-15 20:01:18 +01:00
|
|
|
lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err)
|
|
|
|
return lockErr
|
2017-02-15 18:51:57 +01:00
|
|
|
}
|
2017-02-15 20:01:18 +01:00
|
|
|
lockErr.Info = lockInfo
|
2017-02-15 18:51:57 +01:00
|
|
|
|
|
|
|
if lockInfo.ID != id {
|
2017-02-15 20:01:18 +01:00
|
|
|
lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id)
|
|
|
|
return lockErr
|
2017-02-15 18:51:57 +01:00
|
|
|
}
|
|
|
|
|
2017-01-12 22:55:42 +01:00
|
|
|
params := &dynamodb.DeleteItemInput{
|
|
|
|
Key: map[string]*dynamodb.AttributeValue{
|
2017-03-30 19:36:54 +02:00
|
|
|
"LockID": {S: aws.String(c.lockPath())},
|
2017-01-12 22:55:42 +01:00
|
|
|
},
|
2017-05-26 01:12:20 +02:00
|
|
|
TableName: aws.String(c.ddbTable),
|
2017-01-12 22:55:42 +01:00
|
|
|
}
|
2017-02-15 18:51:57 +01:00
|
|
|
_, err = c.dynClient.DeleteItem(params)
|
2017-01-12 22:55:42 +01:00
|
|
|
|
|
|
|
if err != nil {
|
2017-02-15 20:01:18 +01:00
|
|
|
lockErr.Err = err
|
|
|
|
return lockErr
|
2017-01-12 22:55:42 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-30 19:36:54 +02:00
|
|
|
|
|
|
|
func (c *RemoteClient) lockPath() string {
|
|
|
|
return fmt.Sprintf("%s/%s", c.bucketName, c.path)
|
|
|
|
}
|
2017-05-24 19:51:33 +02:00
|
|
|
|
2019-07-05 17:54:07 +02:00
|
|
|
func (c *RemoteClient) getSSECustomerKeyMD5() string {
|
|
|
|
b := md5.Sum(c.customerEncryptionKey)
|
|
|
|
return base64.StdEncoding.EncodeToString(b[:])
|
|
|
|
}
|
|
|
|
|
2017-05-24 19:51:33 +02:00
|
|
|
const errBadChecksumFmt = `state data in S3 does not have the expected content.
|
|
|
|
|
|
|
|
This may be caused by unusually long delays in S3 processing a previous state
|
|
|
|
update. Please wait for a minute or two and try again. If this problem
|
|
|
|
persists, and neither S3 nor DynamoDB are experiencing an outage, you may need
|
|
|
|
to manually verify the remote state and update the Digest value stored in the
|
|
|
|
DynamoDB table to the following value: %x
|
|
|
|
`
|
2019-01-09 19:01:37 +01:00
|
|
|
|
|
|
|
const errS3NoSuchBucket = `S3 bucket does not exist.
|
|
|
|
|
|
|
|
The referenced S3 bucket must have been previously created. If the S3 bucket
|
|
|
|
was created within the last minute, please wait for a minute or two and try
|
|
|
|
again.
|
|
|
|
|
|
|
|
Error: %s
|
|
|
|
`
|