2014-07-21 20:34:47 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
2015-09-05 10:19:52 +02:00
|
|
|
"bytes"
|
2015-05-16 12:11:23 +02:00
|
|
|
"encoding/json"
|
2014-07-21 20:34:47 +02:00
|
|
|
"fmt"
|
|
|
|
"log"
|
2015-11-28 22:32:19 +01:00
|
|
|
"net/url"
|
2017-01-09 17:22:47 +01:00
|
|
|
"regexp"
|
|
|
|
"strings"
|
2015-12-14 19:42:08 +01:00
|
|
|
"time"
|
2014-07-21 20:34:47 +02:00
|
|
|
|
2015-06-03 20:36:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
2016-09-21 11:34:52 +02:00
|
|
|
"github.com/hashicorp/errwrap"
|
2015-09-05 10:19:52 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/hashcode"
|
2016-09-02 00:34:45 +02:00
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2014-07-21 20:34:47 +02:00
|
|
|
)
|
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
func resourceAwsS3Bucket() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceAwsS3BucketCreate,
|
|
|
|
Read: resourceAwsS3BucketRead,
|
2015-03-26 15:17:27 +01:00
|
|
|
Update: resourceAwsS3BucketUpdate,
|
2014-11-21 17:58:34 +01:00
|
|
|
Delete: resourceAwsS3BucketDelete,
|
2016-08-18 17:01:20 +02:00
|
|
|
Importer: &schema.ResourceImporter{
|
2016-09-19 09:28:45 +02:00
|
|
|
State: resourceAwsS3BucketImportState,
|
2016-08-18 17:01:20 +02:00
|
|
|
},
|
2014-11-21 17:58:34 +01:00
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"bucket": {
|
2014-11-21 17:58:34 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2017-01-24 14:54:46 +01:00
|
|
|
"bucket_domain_name": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"arn": {
|
2015-10-29 14:33:35 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"acl": {
|
2014-11-21 17:58:34 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Default: "private",
|
|
|
|
Optional: true,
|
|
|
|
},
|
2015-03-26 15:17:27 +01:00
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"policy": {
|
2016-09-02 00:34:45 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2016-09-14 13:54:59 +02:00
|
|
|
ValidateFunc: validateJsonString,
|
2016-09-02 00:34:45 +02:00
|
|
|
DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs,
|
2015-05-03 02:23:45 +02:00
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"cors_rule": {
|
2015-10-01 18:49:32 +02:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"allowed_headers": {
|
2015-10-01 18:49:32 +02:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"allowed_methods": {
|
2015-10-01 18:49:32 +02:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Required: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"allowed_origins": {
|
2015-10-01 18:49:32 +02:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Required: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"expose_headers": {
|
2015-10-01 18:49:32 +02:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"max_age_seconds": {
|
2015-10-01 18:49:32 +02:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"website": {
|
2015-05-01 15:19:54 +02:00
|
|
|
Type: schema.TypeList,
|
2015-04-29 18:16:01 +02:00
|
|
|
Optional: true,
|
2015-05-01 15:19:54 +02:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"index_document": {
|
2015-05-01 15:19:54 +02:00
|
|
|
Type: schema.TypeString,
|
2015-05-11 16:53:33 +02:00
|
|
|
Optional: true,
|
2015-05-01 15:19:54 +02:00
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"error_document": {
|
2015-05-01 15:19:54 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
2015-05-11 16:53:33 +02:00
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"redirect_all_requests_to": {
|
2015-05-11 16:53:33 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
ConflictsWith: []string{
|
|
|
|
"website.0.index_document",
|
|
|
|
"website.0.error_document",
|
2016-02-25 19:19:23 +01:00
|
|
|
"website.0.routing_rules",
|
2015-05-11 16:53:33 +02:00
|
|
|
},
|
|
|
|
Optional: true,
|
|
|
|
},
|
2016-02-25 19:19:23 +01:00
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"routing_rules": {
|
2016-09-14 13:54:59 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ValidateFunc: validateJsonString,
|
|
|
|
StateFunc: func(v interface{}) string {
|
|
|
|
json, _ := normalizeJsonString(v)
|
|
|
|
return json
|
|
|
|
},
|
2016-02-25 19:19:23 +01:00
|
|
|
},
|
2015-05-01 15:19:54 +02:00
|
|
|
},
|
|
|
|
},
|
2015-04-29 21:47:21 +02:00
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"hosted_zone_id": {
|
2015-05-07 18:00:39 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"region": {
|
2015-05-07 18:09:19 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"website_endpoint": {
|
2015-04-29 21:47:21 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
2015-04-29 18:16:01 +02:00
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"website_domain": {
|
2015-06-03 17:10:17 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2015-04-29 18:16:01 +02:00
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"versioning": {
|
2016-12-12 23:34:03 +01:00
|
|
|
Type: schema.TypeList,
|
2015-08-05 04:09:31 +02:00
|
|
|
Optional: true,
|
2016-12-12 23:34:03 +01:00
|
|
|
Computed: true,
|
|
|
|
MaxItems: 1,
|
2015-09-05 10:19:52 +02:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"enabled": {
|
2015-09-05 10:19:52 +02:00
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
2016-12-12 23:34:03 +01:00
|
|
|
"mfa_delete": {
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
2015-09-05 10:19:52 +02:00
|
|
|
},
|
|
|
|
},
|
2015-08-05 04:09:31 +02:00
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"logging": {
|
2016-01-02 04:45:40 +01:00
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"target_bucket": {
|
2016-01-02 04:45:40 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"target_prefix": {
|
2016-01-02 04:45:40 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Set: func(v interface{}) int {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"]))
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"]))
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"lifecycle_rule": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"id": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ValidateFunc: validateS3BucketLifecycleRuleId,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"prefix": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"enabled": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeBool,
|
|
|
|
Required: true,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"abort_incomplete_multipart_upload_days": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"expiration": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Set: expirationHash,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"date": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ValidateFunc: validateS3BucketLifecycleTimestamp,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"days": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"expired_object_delete_marker": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"noncurrent_version_expiration": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Set: expirationHash,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"days": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"transition": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Set: transitionHash,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"date": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ValidateFunc: validateS3BucketLifecycleTimestamp,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"days": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"storage_class": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validateS3BucketLifecycleStorageClass,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"noncurrent_version_transition": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Set: transitionHash,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2016-12-02 13:22:47 +01:00
|
|
|
"days": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
2016-12-02 13:22:47 +01:00
|
|
|
"storage_class": {
|
2016-04-20 12:16:14 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validateS3BucketLifecycleStorageClass,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"force_destroy": {
|
2015-05-21 01:06:27 +02:00
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
2016-05-18 18:33:03 +02:00
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"acceleration_status": {
|
2016-05-18 18:33:03 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ValidateFunc: validateS3BucketAccelerationStatus,
|
|
|
|
},
|
2016-08-10 01:01:17 +02:00
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
"request_payer": {
|
2016-08-10 01:01:17 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ValidateFunc: validateS3BucketRequestPayerType,
|
|
|
|
},
|
|
|
|
|
2016-12-08 11:59:13 +01:00
|
|
|
"replication_configuration": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"role": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"rules": {
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
|
|
|
Set: rulesHash,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"id": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ValidateFunc: validateS3BucketReplicationRuleId,
|
|
|
|
},
|
|
|
|
"destination": {
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
MaxItems: 1,
|
|
|
|
MinItems: 1,
|
|
|
|
Required: true,
|
|
|
|
Set: destinationHash,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"bucket": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validateArn,
|
|
|
|
},
|
|
|
|
"storage_class": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ValidateFunc: validateS3BucketReplicationDestinationStorageClass,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"prefix": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validateS3BucketReplicationRulePrefix,
|
|
|
|
},
|
|
|
|
"status": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validateS3BucketReplicationRuleStatus,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2016-08-10 01:01:17 +02:00
|
|
|
"tags": tagsSchema(),
|
2014-07-21 20:34:47 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
s3conn := meta.(*AWSClient).s3conn
|
2014-07-21 20:34:47 +02:00
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
// Get the bucket and acl
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
acl := d.Get("acl").(string)
|
2014-07-21 20:34:47 +02:00
|
|
|
|
|
|
|
log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
|
2015-02-19 22:38:56 +01:00
|
|
|
|
2015-04-14 18:07:05 +02:00
|
|
|
req := &s3.CreateBucketInput{
|
2015-02-19 22:38:56 +01:00
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
ACL: aws.String(acl),
|
2015-03-02 18:13:19 +01:00
|
|
|
}
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
var awsRegion string
|
|
|
|
if region, ok := d.GetOk("region"); ok {
|
|
|
|
awsRegion = region.(string)
|
|
|
|
} else {
|
|
|
|
awsRegion = meta.(*AWSClient).region
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 bucket create: %s, using region: %s", bucket, awsRegion)
|
|
|
|
|
2015-03-02 18:13:19 +01:00
|
|
|
// Special case us-east-1 region and do not set the LocationConstraint.
|
|
|
|
// See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html
|
|
|
|
if awsRegion != "us-east-1" {
|
|
|
|
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
2015-02-19 22:38:56 +01:00
|
|
|
LocationConstraint: aws.String(awsRegion),
|
2015-03-02 18:13:19 +01:00
|
|
|
}
|
2015-02-19 22:38:56 +01:00
|
|
|
}
|
|
|
|
|
2017-01-09 17:22:47 +01:00
|
|
|
if err := validateS3BucketName(bucket, awsRegion); err != nil {
|
|
|
|
return fmt.Errorf("Error validating S3 bucket name: %s", err)
|
|
|
|
}
|
|
|
|
|
2016-03-09 23:53:32 +01:00
|
|
|
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
2016-02-17 11:54:46 +01:00
|
|
|
log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket)
|
|
|
|
_, err := s3conn.CreateBucket(req)
|
|
|
|
if awsErr, ok := err.(awserr.Error); ok {
|
|
|
|
if awsErr.Code() == "OperationAborted" {
|
|
|
|
log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err)
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.RetryableError(
|
|
|
|
fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s",
|
|
|
|
bucket, err))
|
2016-02-17 11:54:46 +01:00
|
|
|
}
|
2016-02-17 14:49:37 +01:00
|
|
|
}
|
|
|
|
if err != nil {
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.NonRetryableError(err)
|
2016-02-17 11:54:46 +01:00
|
|
|
}
|
2016-02-17 14:49:37 +01:00
|
|
|
|
2016-02-17 11:54:46 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2014-07-21 20:34:47 +02:00
|
|
|
if err != nil {
|
2014-11-21 17:58:34 +01:00
|
|
|
return fmt.Errorf("Error creating S3 bucket: %s", err)
|
2014-07-21 20:34:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Assign the bucket name as the resource ID
|
2014-11-21 17:58:34 +01:00
|
|
|
d.SetId(bucket)
|
2014-07-21 20:34:47 +02:00
|
|
|
|
2015-03-26 15:17:27 +01:00
|
|
|
return resourceAwsS3BucketUpdate(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
s3conn := meta.(*AWSClient).s3conn
|
|
|
|
if err := setTagsS3(s3conn, d); err != nil {
|
2017-02-20 20:59:47 +01:00
|
|
|
return fmt.Errorf("%s %q", err, d.Get("bucket").(string))
|
2015-03-26 15:17:27 +01:00
|
|
|
}
|
2015-04-26 01:36:00 +02:00
|
|
|
|
2015-05-03 02:23:45 +02:00
|
|
|
if d.HasChange("policy") {
|
|
|
|
if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-01 18:49:32 +02:00
|
|
|
if d.HasChange("cors_rule") {
|
|
|
|
if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-03 02:23:45 +02:00
|
|
|
if d.HasChange("website") {
|
|
|
|
if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-04-26 01:36:00 +02:00
|
|
|
}
|
|
|
|
|
2015-08-05 04:09:31 +02:00
|
|
|
if d.HasChange("versioning") {
|
|
|
|
if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2015-11-26 22:45:48 +01:00
|
|
|
if d.HasChange("acl") {
|
|
|
|
if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2015-08-05 04:09:31 +02:00
|
|
|
|
2016-01-02 04:45:40 +01:00
|
|
|
if d.HasChange("logging") {
|
|
|
|
if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-20 12:16:14 +02:00
|
|
|
if d.HasChange("lifecycle_rule") {
|
|
|
|
if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-18 18:33:03 +02:00
|
|
|
if d.HasChange("acceleration_status") {
|
|
|
|
if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-10 01:01:17 +02:00
|
|
|
if d.HasChange("request_payer") {
|
|
|
|
if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-08 11:59:13 +01:00
|
|
|
if d.HasChange("replication_configuration") {
|
|
|
|
if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-26 15:17:27 +01:00
|
|
|
return resourceAwsS3BucketRead(d, meta)
|
2014-07-21 20:34:47 +02:00
|
|
|
}
|
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
s3conn := meta.(*AWSClient).s3conn
|
2014-07-21 20:34:47 +02:00
|
|
|
|
2015-05-20 13:21:23 +02:00
|
|
|
var err error
|
|
|
|
_, err = s3conn.HeadBucket(&s3.HeadBucketInput{
|
2015-02-19 22:38:56 +01:00
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
2014-07-21 20:34:47 +02:00
|
|
|
if err != nil {
|
2015-05-20 13:21:23 +02:00
|
|
|
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 {
|
2015-08-03 23:17:01 +02:00
|
|
|
log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id())
|
2015-04-17 19:30:31 +02:00
|
|
|
d.SetId("")
|
2015-08-03 23:17:01 +02:00
|
|
|
return nil
|
2015-04-17 19:30:31 +02:00
|
|
|
} else {
|
|
|
|
// some of the AWS SDK's errors can be empty strings, so let's add
|
|
|
|
// some additional context.
|
2015-04-20 03:13:04 +02:00
|
|
|
return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err)
|
2015-04-17 19:30:31 +02:00
|
|
|
}
|
2014-07-21 20:34:47 +02:00
|
|
|
}
|
2015-03-26 17:45:16 +01:00
|
|
|
|
2016-04-21 17:44:45 +02:00
|
|
|
// In the import case, we won't have this
|
|
|
|
if _, ok := d.GetOk("bucket"); !ok {
|
|
|
|
d.Set("bucket", d.Id())
|
|
|
|
}
|
|
|
|
|
2017-01-24 14:54:46 +01:00
|
|
|
d.Set("bucket_domain_name", bucketDomainName(d.Get("bucket").(string)))
|
|
|
|
|
2015-05-03 02:23:45 +02:00
|
|
|
// Read the policy
|
2016-09-18 23:35:07 +02:00
|
|
|
if _, ok := d.GetOk("policy"); ok {
|
|
|
|
pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol)
|
|
|
|
if err != nil {
|
2015-05-03 02:23:45 +02:00
|
|
|
if err := d.Set("policy", ""); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-18 23:35:07 +02:00
|
|
|
} else {
|
|
|
|
if v := pol.Policy; v == nil {
|
|
|
|
if err := d.Set("policy", ""); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-14 13:54:59 +02:00
|
|
|
} else {
|
2016-09-21 11:34:52 +02:00
|
|
|
policy, err := normalizeJsonString(*v)
|
|
|
|
if err != nil {
|
|
|
|
return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err)
|
2016-09-14 13:54:59 +02:00
|
|
|
}
|
2016-09-21 11:34:52 +02:00
|
|
|
d.Set("policy", policy)
|
2016-09-18 23:35:07 +02:00
|
|
|
}
|
2015-05-03 02:23:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-01 18:49:32 +02:00
|
|
|
// Read the CORS
|
|
|
|
cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
2016-08-12 05:14:48 +02:00
|
|
|
// An S3 Bucket might not have CORS configuration set.
|
|
|
|
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id())
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors)
|
|
|
|
if cors.CORSRules != nil {
|
2015-10-01 18:49:32 +02:00
|
|
|
rules := make([]map[string]interface{}, 0, len(cors.CORSRules))
|
|
|
|
for _, ruleObject := range cors.CORSRules {
|
|
|
|
rule := make(map[string]interface{})
|
2016-08-12 05:14:48 +02:00
|
|
|
rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders)
|
|
|
|
rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods)
|
|
|
|
rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins)
|
|
|
|
// Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set.
|
|
|
|
if ruleObject.AllowedOrigins != nil {
|
|
|
|
rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders)
|
|
|
|
}
|
|
|
|
if ruleObject.MaxAgeSeconds != nil {
|
|
|
|
rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds)
|
|
|
|
}
|
2015-10-01 18:49:32 +02:00
|
|
|
rules = append(rules, rule)
|
|
|
|
}
|
2015-10-28 01:19:37 +01:00
|
|
|
if err := d.Set("cors_rule", rules); err != nil {
|
2016-08-12 05:14:48 +02:00
|
|
|
return err
|
2015-10-28 01:19:37 +01:00
|
|
|
}
|
2015-10-01 18:49:32 +02:00
|
|
|
}
|
|
|
|
|
2015-05-06 14:12:40 +02:00
|
|
|
// Read the website configuration
|
|
|
|
ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
var websites []map[string]interface{}
|
|
|
|
if err == nil {
|
2015-05-06 14:40:32 +02:00
|
|
|
w := make(map[string]interface{})
|
|
|
|
|
2015-05-11 16:53:33 +02:00
|
|
|
if v := ws.IndexDocument; v != nil {
|
|
|
|
w["index_document"] = *v.Suffix
|
|
|
|
}
|
2015-05-06 14:40:32 +02:00
|
|
|
|
|
|
|
if v := ws.ErrorDocument; v != nil {
|
|
|
|
w["error_document"] = *v.Key
|
|
|
|
}
|
|
|
|
|
2015-05-11 16:53:33 +02:00
|
|
|
if v := ws.RedirectAllRequestsTo; v != nil {
|
2015-11-28 22:32:19 +01:00
|
|
|
if v.Protocol == nil {
|
|
|
|
w["redirect_all_requests_to"] = *v.HostName
|
|
|
|
} else {
|
2016-08-07 09:16:31 +02:00
|
|
|
var host string
|
|
|
|
var path string
|
|
|
|
parsedHostName, err := url.Parse(*v.HostName)
|
|
|
|
if err == nil {
|
|
|
|
host = parsedHostName.Host
|
|
|
|
path = parsedHostName.Path
|
|
|
|
} else {
|
|
|
|
host = *v.HostName
|
|
|
|
path = ""
|
|
|
|
}
|
|
|
|
|
2015-11-28 22:32:19 +01:00
|
|
|
w["redirect_all_requests_to"] = (&url.URL{
|
2016-08-07 09:16:31 +02:00
|
|
|
Host: host,
|
|
|
|
Path: path,
|
2015-11-28 22:32:19 +01:00
|
|
|
Scheme: *v.Protocol,
|
|
|
|
}).String()
|
|
|
|
}
|
2015-05-11 16:53:33 +02:00
|
|
|
}
|
|
|
|
|
2016-02-25 19:19:23 +01:00
|
|
|
if v := ws.RoutingRules; v != nil {
|
|
|
|
rr, err := normalizeRoutingRules(v)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error while marshaling routing rules: %s", err)
|
|
|
|
}
|
|
|
|
w["routing_rules"] = rr
|
|
|
|
}
|
|
|
|
|
2015-05-06 14:40:32 +02:00
|
|
|
websites = append(websites, w)
|
2015-05-06 14:12:40 +02:00
|
|
|
}
|
|
|
|
if err := d.Set("website", websites); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-05 04:09:31 +02:00
|
|
|
// Read the versioning configuration
|
|
|
|
versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning)
|
2016-12-12 23:34:03 +01:00
|
|
|
if versioning != nil {
|
2015-09-05 10:19:52 +02:00
|
|
|
vcl := make([]map[string]interface{}, 0, 1)
|
|
|
|
vc := make(map[string]interface{})
|
2016-12-12 23:34:03 +01:00
|
|
|
if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled {
|
2015-09-05 10:19:52 +02:00
|
|
|
vc["enabled"] = true
|
2015-08-05 04:09:31 +02:00
|
|
|
} else {
|
2015-09-05 10:19:52 +02:00
|
|
|
vc["enabled"] = false
|
2015-08-05 04:09:31 +02:00
|
|
|
}
|
2016-12-12 23:34:03 +01:00
|
|
|
|
|
|
|
if versioning.MFADelete != nil && *versioning.MFADelete == s3.MFADeleteEnabled {
|
|
|
|
vc["mfa_delete"] = true
|
|
|
|
} else {
|
|
|
|
vc["mfa_delete"] = false
|
|
|
|
}
|
2015-09-05 10:19:52 +02:00
|
|
|
vcl = append(vcl, vc)
|
|
|
|
if err := d.Set("versioning", vcl); err != nil {
|
2015-08-05 04:09:31 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-02 13:22:47 +01:00
|
|
|
// Read the acceleration status
|
2016-05-18 18:33:03 +02:00
|
|
|
accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
2016-08-08 09:05:54 +02:00
|
|
|
// Amazon S3 Transfer Acceleration might not be supported in the
|
|
|
|
// given region, for example, China (Beijing) and the Government
|
|
|
|
// Cloud does not support this feature at the moment.
|
|
|
|
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-02 13:22:47 +01:00
|
|
|
|
|
|
|
var awsRegion string
|
|
|
|
if region, ok := d.GetOk("region"); ok {
|
|
|
|
awsRegion = region.(string)
|
|
|
|
} else {
|
|
|
|
awsRegion = meta.(*AWSClient).region
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), awsRegion)
|
2016-08-08 09:05:54 +02:00
|
|
|
} else {
|
|
|
|
log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate)
|
|
|
|
d.Set("acceleration_status", accelerate.Status)
|
2016-05-18 18:33:03 +02:00
|
|
|
}
|
|
|
|
|
2016-08-10 01:01:17 +02:00
|
|
|
// Read the request payer configuration.
|
|
|
|
payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer)
|
|
|
|
if payer.Payer != nil {
|
|
|
|
if err := d.Set("request_payer", *payer.Payer); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-02 04:45:40 +01:00
|
|
|
// Read the logging configuration
|
|
|
|
logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-10 01:01:17 +02:00
|
|
|
|
2016-01-02 04:45:40 +01:00
|
|
|
log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging)
|
|
|
|
if v := logging.LoggingEnabled; v != nil {
|
|
|
|
lcl := make([]map[string]interface{}, 0, 1)
|
|
|
|
lc := make(map[string]interface{})
|
|
|
|
if *v.TargetBucket != "" {
|
|
|
|
lc["target_bucket"] = *v.TargetBucket
|
|
|
|
}
|
|
|
|
if *v.TargetPrefix != "" {
|
|
|
|
lc["target_prefix"] = *v.TargetPrefix
|
|
|
|
}
|
|
|
|
lcl = append(lcl, lc)
|
|
|
|
if err := d.Set("logging", lcl); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-20 12:16:14 +02:00
|
|
|
// Read the lifecycle configuration
|
|
|
|
lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle)
|
|
|
|
if len(lifecycle.Rules) > 0 {
|
|
|
|
rules := make([]map[string]interface{}, 0, len(lifecycle.Rules))
|
|
|
|
|
|
|
|
for _, lifecycleRule := range lifecycle.Rules {
|
|
|
|
rule := make(map[string]interface{})
|
|
|
|
|
|
|
|
// ID
|
|
|
|
if lifecycleRule.ID != nil && *lifecycleRule.ID != "" {
|
|
|
|
rule["id"] = *lifecycleRule.ID
|
|
|
|
}
|
|
|
|
// Prefix
|
|
|
|
if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" {
|
|
|
|
rule["prefix"] = *lifecycleRule.Prefix
|
|
|
|
}
|
|
|
|
// Enabled
|
|
|
|
if lifecycleRule.Status != nil {
|
|
|
|
if *lifecycleRule.Status == s3.ExpirationStatusEnabled {
|
|
|
|
rule["enabled"] = true
|
|
|
|
} else {
|
|
|
|
rule["enabled"] = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AbortIncompleteMultipartUploadDays
|
|
|
|
if lifecycleRule.AbortIncompleteMultipartUpload != nil {
|
|
|
|
if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil {
|
|
|
|
rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// expiration
|
|
|
|
if lifecycleRule.Expiration != nil {
|
|
|
|
e := make(map[string]interface{})
|
|
|
|
if lifecycleRule.Expiration.Date != nil {
|
|
|
|
e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02")
|
|
|
|
}
|
|
|
|
if lifecycleRule.Expiration.Days != nil {
|
|
|
|
e["days"] = int(*lifecycleRule.Expiration.Days)
|
|
|
|
}
|
|
|
|
if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil {
|
|
|
|
e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker
|
|
|
|
}
|
|
|
|
rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e})
|
|
|
|
}
|
|
|
|
// noncurrent_version_expiration
|
|
|
|
if lifecycleRule.NoncurrentVersionExpiration != nil {
|
|
|
|
e := make(map[string]interface{})
|
|
|
|
if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil {
|
|
|
|
e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)
|
|
|
|
}
|
|
|
|
rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e})
|
|
|
|
}
|
|
|
|
//// transition
|
|
|
|
if len(lifecycleRule.Transitions) > 0 {
|
|
|
|
transitions := make([]interface{}, 0, len(lifecycleRule.Transitions))
|
|
|
|
for _, v := range lifecycleRule.Transitions {
|
|
|
|
t := make(map[string]interface{})
|
|
|
|
if v.Date != nil {
|
|
|
|
t["date"] = (*v.Date).Format("2006-01-02")
|
|
|
|
}
|
|
|
|
if v.Days != nil {
|
|
|
|
t["days"] = int(*v.Days)
|
|
|
|
}
|
|
|
|
if v.StorageClass != nil {
|
|
|
|
t["storage_class"] = *v.StorageClass
|
|
|
|
}
|
|
|
|
transitions = append(transitions, t)
|
|
|
|
}
|
|
|
|
rule["transition"] = schema.NewSet(transitionHash, transitions)
|
|
|
|
}
|
|
|
|
// noncurrent_version_transition
|
|
|
|
if len(lifecycleRule.NoncurrentVersionTransitions) > 0 {
|
|
|
|
transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions))
|
|
|
|
for _, v := range lifecycleRule.NoncurrentVersionTransitions {
|
|
|
|
t := make(map[string]interface{})
|
|
|
|
if v.NoncurrentDays != nil {
|
|
|
|
t["days"] = int(*v.NoncurrentDays)
|
|
|
|
}
|
|
|
|
if v.StorageClass != nil {
|
|
|
|
t["storage_class"] = *v.StorageClass
|
|
|
|
}
|
|
|
|
transitions = append(transitions, t)
|
|
|
|
}
|
|
|
|
rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions)
|
|
|
|
}
|
|
|
|
|
|
|
|
rules = append(rules, rule)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := d.Set("lifecycle_rule", rules); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-08 11:59:13 +01:00
|
|
|
// Read the bucket replication configuration
|
|
|
|
replication, err := s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication)
|
|
|
|
if r := replication.ReplicationConfiguration; r != nil {
|
|
|
|
if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil {
|
|
|
|
log.Printf("[DEBUG] Error setting replication configuration: %s", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-07 18:09:19 +02:00
|
|
|
// Add the region as an attribute
|
2015-05-07 18:00:39 +02:00
|
|
|
location, err := s3conn.GetBucketLocation(
|
|
|
|
&s3.GetBucketLocationInput{
|
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
var region string
|
|
|
|
if location.LocationConstraint != nil {
|
|
|
|
region = *location.LocationConstraint
|
|
|
|
}
|
2015-05-08 16:29:47 +02:00
|
|
|
region = normalizeRegion(region)
|
2015-05-07 18:09:19 +02:00
|
|
|
if err := d.Set("region", region); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-07 18:00:39 +02:00
|
|
|
|
|
|
|
// Add the hosted zone ID for this bucket's region as an attribute
|
2015-05-08 16:29:26 +02:00
|
|
|
hostedZoneID := HostedZoneIDForRegion(region)
|
2015-05-07 18:00:39 +02:00
|
|
|
if err := d.Set("hosted_zone_id", hostedZoneID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add website_endpoint as an attribute
|
2015-06-03 17:10:17 +02:00
|
|
|
websiteEndpoint, err := websiteEndpoint(s3conn, d)
|
2015-04-29 21:47:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-06-03 17:10:17 +02:00
|
|
|
if websiteEndpoint != nil {
|
|
|
|
if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-04-29 21:47:21 +02:00
|
|
|
}
|
|
|
|
|
2015-04-01 14:57:50 +02:00
|
|
|
tagSet, err := getTagSetS3(s3conn, d.Id())
|
2015-03-26 17:45:16 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-04-01 14:57:50 +02:00
|
|
|
if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil {
|
2015-03-26 17:45:16 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-01-22 21:03:21 +01:00
|
|
|
d.Set("arn", fmt.Sprintf("arn:%s:s3:::%s", meta.(*AWSClient).partition, d.Id()))
|
2015-10-29 14:33:35 +01:00
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
return nil
|
2014-07-21 20:34:47 +02:00
|
|
|
}
|
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
s3conn := meta.(*AWSClient).s3conn
|
2014-07-21 20:34:47 +02:00
|
|
|
|
2014-11-21 17:58:34 +01:00
|
|
|
log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id())
|
2015-04-14 18:07:05 +02:00
|
|
|
_, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{
|
2015-02-19 22:38:56 +01:00
|
|
|
Bucket: aws.String(d.Id()),
|
|
|
|
})
|
|
|
|
if err != nil {
|
2015-05-21 01:06:27 +02:00
|
|
|
ec2err, ok := err.(awserr.Error)
|
|
|
|
if ok && ec2err.Code() == "BucketNotEmpty" {
|
|
|
|
if d.Get("force_destroy").(bool) {
|
|
|
|
// bucket may have things delete them
|
|
|
|
log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err)
|
|
|
|
|
|
|
|
bucket := d.Get("bucket").(string)
|
2015-12-04 16:10:26 +01:00
|
|
|
resp, err := s3conn.ListObjectVersions(
|
|
|
|
&s3.ListObjectVersionsInput{
|
2015-05-21 01:06:27 +02:00
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
if err != nil {
|
2015-12-04 16:10:26 +01:00
|
|
|
return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err)
|
2015-05-21 01:06:27 +02:00
|
|
|
}
|
|
|
|
|
2015-12-04 16:10:26 +01:00
|
|
|
objectsToDelete := make([]*s3.ObjectIdentifier, 0)
|
|
|
|
|
|
|
|
if len(resp.DeleteMarkers) != 0 {
|
|
|
|
|
|
|
|
for _, v := range resp.DeleteMarkers {
|
|
|
|
objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
|
|
|
|
Key: v.Key,
|
|
|
|
VersionId: v.VersionId,
|
|
|
|
})
|
2015-05-21 01:06:27 +02:00
|
|
|
}
|
|
|
|
}
|
2015-12-04 16:10:26 +01:00
|
|
|
|
|
|
|
if len(resp.Versions) != 0 {
|
|
|
|
for _, v := range resp.Versions {
|
|
|
|
objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
|
|
|
|
Key: v.Key,
|
|
|
|
VersionId: v.VersionId,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
params := &s3.DeleteObjectsInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
Delete: &s3.Delete{
|
|
|
|
Objects: objectsToDelete,
|
2015-05-21 01:06:27 +02:00
|
|
|
},
|
2015-12-04 16:10:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err = s3conn.DeleteObjects(params)
|
|
|
|
|
2015-05-21 01:06:27 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// this line recurses until all objects are deleted or an error is returned
|
|
|
|
return resourceAwsS3BucketDelete(d, meta)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fmt.Errorf("Error deleting S3 Bucket: %s", err)
|
2015-02-19 22:38:56 +01:00
|
|
|
}
|
|
|
|
return nil
|
2014-07-21 20:34:47 +02:00
|
|
|
}
|
2015-04-26 01:36:00 +02:00
|
|
|
|
2015-05-03 02:23:45 +02:00
|
|
|
func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
policy := d.Get("policy").(string)
|
|
|
|
|
|
|
|
if policy != "" {
|
|
|
|
log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy)
|
|
|
|
|
2015-12-14 19:42:08 +01:00
|
|
|
params := &s3.PutBucketPolicyInput{
|
2015-05-03 02:23:45 +02:00
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
Policy: aws.String(policy),
|
2015-12-14 19:42:08 +01:00
|
|
|
}
|
|
|
|
|
2016-03-09 23:53:32 +01:00
|
|
|
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
|
2015-12-14 19:42:08 +01:00
|
|
|
if _, err := s3conn.PutBucketPolicy(params); err != nil {
|
|
|
|
if awserr, ok := err.(awserr.Error); ok {
|
|
|
|
if awserr.Code() == "MalformedPolicy" {
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.RetryableError(awserr)
|
2015-12-14 19:42:08 +01:00
|
|
|
}
|
|
|
|
}
|
2016-03-09 23:53:32 +01:00
|
|
|
return resource.NonRetryableError(err)
|
2015-12-14 19:42:08 +01:00
|
|
|
}
|
|
|
|
return nil
|
2015-05-03 02:23:45 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 policy: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy)
|
|
|
|
_, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting S3 policy: %s", err)
|
|
|
|
}
|
2015-05-01 15:19:54 +02:00
|
|
|
}
|
|
|
|
|
2015-05-03 02:23:45 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-10-01 18:49:32 +02:00
|
|
|
func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
rawCors := d.Get("cors_rule").([]interface{})
|
|
|
|
|
|
|
|
if len(rawCors) == 0 {
|
|
|
|
// Delete CORS
|
|
|
|
log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket)
|
|
|
|
_, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting S3 CORS: %s", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Put CORS
|
|
|
|
rules := make([]*s3.CORSRule, 0, len(rawCors))
|
|
|
|
for _, cors := range rawCors {
|
|
|
|
corsMap := cors.(map[string]interface{})
|
|
|
|
r := &s3.CORSRule{}
|
|
|
|
for k, v := range corsMap {
|
|
|
|
log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v)
|
|
|
|
if k == "max_age_seconds" {
|
|
|
|
r.MaxAgeSeconds = aws.Int64(int64(v.(int)))
|
|
|
|
} else {
|
|
|
|
vMap := make([]*string, len(v.([]interface{})))
|
|
|
|
for i, vv := range v.([]interface{}) {
|
|
|
|
str := vv.(string)
|
|
|
|
vMap[i] = aws.String(str)
|
|
|
|
}
|
|
|
|
switch k {
|
|
|
|
case "allowed_headers":
|
|
|
|
r.AllowedHeaders = vMap
|
|
|
|
case "allowed_methods":
|
|
|
|
r.AllowedMethods = vMap
|
|
|
|
case "allowed_origins":
|
|
|
|
r.AllowedOrigins = vMap
|
|
|
|
case "expose_headers":
|
|
|
|
r.ExposeHeaders = vMap
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rules = append(rules, r)
|
|
|
|
}
|
|
|
|
corsInput := &s3.PutBucketCorsInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
CORSConfiguration: &s3.CORSConfiguration{
|
|
|
|
CORSRules: rules,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput)
|
|
|
|
_, err := s3conn.PutBucketCors(corsInput)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 CORS: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-03 02:23:45 +02:00
|
|
|
func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
2015-05-01 15:19:54 +02:00
|
|
|
ws := d.Get("website").([]interface{})
|
|
|
|
|
|
|
|
if len(ws) == 1 {
|
2016-04-21 15:07:01 +02:00
|
|
|
var w map[string]interface{}
|
|
|
|
if ws[0] != nil {
|
|
|
|
w = ws[0].(map[string]interface{})
|
|
|
|
} else {
|
|
|
|
w = make(map[string]interface{})
|
|
|
|
}
|
2015-05-01 15:19:54 +02:00
|
|
|
return resourceAwsS3BucketWebsitePut(s3conn, d, w)
|
|
|
|
} else if len(ws) == 0 {
|
|
|
|
return resourceAwsS3BucketWebsiteDelete(s3conn, d)
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("Cannot specify more than one website.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error {
|
2015-04-26 01:36:00 +02:00
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
|
2016-04-21 15:07:01 +02:00
|
|
|
var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string
|
|
|
|
if v, ok := website["index_document"]; ok {
|
|
|
|
indexDocument = v.(string)
|
|
|
|
}
|
|
|
|
if v, ok := website["error_document"]; ok {
|
|
|
|
errorDocument = v.(string)
|
|
|
|
}
|
|
|
|
if v, ok := website["redirect_all_requests_to"]; ok {
|
|
|
|
redirectAllRequestsTo = v.(string)
|
|
|
|
}
|
|
|
|
if v, ok := website["routing_rules"]; ok {
|
|
|
|
routingRules = v.(string)
|
|
|
|
}
|
2015-05-11 16:53:33 +02:00
|
|
|
|
|
|
|
if indexDocument == "" && redirectAllRequestsTo == "" {
|
|
|
|
return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.")
|
|
|
|
}
|
2015-04-29 18:16:01 +02:00
|
|
|
|
2015-05-01 15:19:54 +02:00
|
|
|
websiteConfiguration := &s3.WebsiteConfiguration{}
|
2015-04-29 18:16:01 +02:00
|
|
|
|
2015-05-11 16:53:33 +02:00
|
|
|
if indexDocument != "" {
|
|
|
|
websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)}
|
|
|
|
}
|
2015-04-29 18:16:01 +02:00
|
|
|
|
2015-05-01 15:19:54 +02:00
|
|
|
if errorDocument != "" {
|
|
|
|
websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)}
|
|
|
|
}
|
2015-04-26 01:36:00 +02:00
|
|
|
|
2015-05-11 16:53:33 +02:00
|
|
|
if redirectAllRequestsTo != "" {
|
2015-11-28 22:32:19 +01:00
|
|
|
redirect, err := url.Parse(redirectAllRequestsTo)
|
|
|
|
if err == nil && redirect.Scheme != "" {
|
2016-08-07 09:16:31 +02:00
|
|
|
var redirectHostBuf bytes.Buffer
|
|
|
|
redirectHostBuf.WriteString(redirect.Host)
|
|
|
|
if redirect.Path != "" {
|
|
|
|
redirectHostBuf.WriteString(redirect.Path)
|
|
|
|
}
|
|
|
|
websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)}
|
2015-11-28 22:32:19 +01:00
|
|
|
} else {
|
|
|
|
websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)}
|
|
|
|
}
|
2015-05-11 16:53:33 +02:00
|
|
|
}
|
|
|
|
|
2016-02-25 19:19:23 +01:00
|
|
|
if routingRules != "" {
|
|
|
|
var unmarshaledRules []*s3.RoutingRule
|
|
|
|
if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
websiteConfiguration.RoutingRules = unmarshaledRules
|
|
|
|
}
|
|
|
|
|
2015-05-01 15:19:54 +02:00
|
|
|
putInput := &s3.PutBucketWebsiteInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
WebsiteConfiguration: websiteConfiguration,
|
|
|
|
}
|
2015-04-26 01:36:00 +02:00
|
|
|
|
2015-05-07 17:03:28 +02:00
|
|
|
log.Printf("[DEBUG] S3 put bucket website: %#v", putInput)
|
2015-04-26 01:36:00 +02:00
|
|
|
|
2015-05-01 15:19:54 +02:00
|
|
|
_, err := s3conn.PutBucketWebsite(putInput)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 website: %s", err)
|
|
|
|
}
|
2015-04-26 01:36:00 +02:00
|
|
|
|
2015-05-01 15:19:54 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)}
|
|
|
|
|
2015-05-07 17:03:28 +02:00
|
|
|
log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput)
|
2015-05-01 15:19:54 +02:00
|
|
|
|
|
|
|
_, err := s3conn.DeleteBucketWebsite(deleteInput)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting S3 website: %s", err)
|
2015-04-26 01:36:00 +02:00
|
|
|
}
|
|
|
|
|
2015-10-14 21:49:33 +02:00
|
|
|
d.Set("website_endpoint", "")
|
|
|
|
d.Set("website_domain", "")
|
|
|
|
|
2015-04-26 01:36:00 +02:00
|
|
|
return nil
|
|
|
|
}
|
2015-04-29 21:47:21 +02:00
|
|
|
|
2015-06-03 17:10:17 +02:00
|
|
|
func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) {
|
2015-05-01 15:19:54 +02:00
|
|
|
// If the bucket doesn't have a website configuration, return an empty
|
|
|
|
// endpoint
|
2015-05-01 15:38:11 +02:00
|
|
|
if _, ok := d.GetOk("website"); !ok {
|
2015-06-03 17:10:17 +02:00
|
|
|
return nil, nil
|
2015-04-29 21:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
|
|
|
|
// Lookup the region for this bucket
|
2015-05-01 15:19:54 +02:00
|
|
|
location, err := s3conn.GetBucketLocation(
|
|
|
|
&s3.GetBucketLocationInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
},
|
|
|
|
)
|
2015-04-29 21:47:21 +02:00
|
|
|
if err != nil {
|
2015-06-03 17:10:17 +02:00
|
|
|
return nil, err
|
2015-04-29 21:47:21 +02:00
|
|
|
}
|
|
|
|
var region string
|
|
|
|
if location.LocationConstraint != nil {
|
|
|
|
region = *location.LocationConstraint
|
|
|
|
}
|
|
|
|
|
2015-06-03 17:10:17 +02:00
|
|
|
return WebsiteEndpoint(bucket, region), nil
|
2015-05-06 17:12:16 +02:00
|
|
|
}
|
|
|
|
|
2017-01-24 14:54:46 +01:00
|
|
|
func bucketDomainName(bucket string) string {
|
|
|
|
return fmt.Sprintf("%s.s3.amazonaws.com", bucket)
|
|
|
|
}
|
|
|
|
|
2015-06-03 17:10:17 +02:00
|
|
|
func WebsiteEndpoint(bucket string, region string) *S3Website {
|
|
|
|
domain := WebsiteDomainUrl(region)
|
|
|
|
return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain}
|
|
|
|
}
|
|
|
|
|
|
|
|
func WebsiteDomainUrl(region string) string {
|
2015-05-08 16:29:47 +02:00
|
|
|
region = normalizeRegion(region)
|
2015-06-07 11:39:18 +02:00
|
|
|
|
2016-09-24 10:43:58 +02:00
|
|
|
// New regions uses different syntax for website endpoints
|
2015-06-07 11:39:18 +02:00
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
|
2016-10-18 10:31:57 +02:00
|
|
|
if isOldRegion(region) {
|
|
|
|
return fmt.Sprintf("s3-website-%s.amazonaws.com", region)
|
2015-06-07 11:39:18 +02:00
|
|
|
}
|
2016-10-18 10:31:57 +02:00
|
|
|
return fmt.Sprintf("s3-website.%s.amazonaws.com", region)
|
|
|
|
}
|
2015-06-07 11:39:18 +02:00
|
|
|
|
2016-10-18 10:31:57 +02:00
|
|
|
func isOldRegion(region string) bool {
|
|
|
|
oldRegions := []string{
|
|
|
|
"ap-northeast-1",
|
|
|
|
"ap-southeast-1",
|
|
|
|
"ap-southeast-2",
|
|
|
|
"eu-west-1",
|
|
|
|
"sa-east-1",
|
|
|
|
"us-east-1",
|
|
|
|
"us-gov-west-1",
|
|
|
|
"us-west-1",
|
|
|
|
"us-west-2",
|
|
|
|
}
|
|
|
|
for _, r := range oldRegions {
|
|
|
|
if region == r {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
2015-05-08 16:29:47 +02:00
|
|
|
}
|
|
|
|
|
2015-11-26 22:45:48 +01:00
|
|
|
func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
acl := d.Get("acl").(string)
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
|
|
|
|
i := &s3.PutBucketAclInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
ACL: aws.String(acl),
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 put bucket ACL: %#v", i)
|
|
|
|
|
|
|
|
_, err := s3conn.PutBucketAcl(i)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 ACL: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-05 04:09:31 +02:00
|
|
|
func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
2016-12-12 23:34:03 +01:00
|
|
|
v := d.Get("versioning").([]interface{})
|
2015-08-05 04:09:31 +02:00
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
vc := &s3.VersioningConfiguration{}
|
2015-09-05 10:19:52 +02:00
|
|
|
|
|
|
|
if len(v) > 0 {
|
|
|
|
c := v[0].(map[string]interface{})
|
|
|
|
|
|
|
|
if c["enabled"].(bool) {
|
|
|
|
vc.Status = aws.String(s3.BucketVersioningStatusEnabled)
|
|
|
|
} else {
|
|
|
|
vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
|
|
|
|
}
|
2016-12-12 23:34:03 +01:00
|
|
|
|
|
|
|
if c["mfa_delete"].(bool) {
|
|
|
|
vc.MFADelete = aws.String(s3.MFADeleteEnabled)
|
|
|
|
} else {
|
|
|
|
vc.MFADelete = aws.String(s3.MFADeleteDisabled)
|
|
|
|
}
|
|
|
|
|
2015-08-05 04:09:31 +02:00
|
|
|
} else {
|
|
|
|
vc.Status = aws.String(s3.BucketVersioningStatusSuspended)
|
|
|
|
}
|
2015-09-05 10:19:52 +02:00
|
|
|
|
2015-08-05 04:09:31 +02:00
|
|
|
i := &s3.PutBucketVersioningInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
VersioningConfiguration: vc,
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 put bucket versioning: %#v", i)
|
|
|
|
|
|
|
|
_, err := s3conn.PutBucketVersioning(i)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 versioning: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-02 04:45:40 +01:00
|
|
|
func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
logging := d.Get("logging").(*schema.Set).List()
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
loggingStatus := &s3.BucketLoggingStatus{}
|
|
|
|
|
|
|
|
if len(logging) > 0 {
|
|
|
|
c := logging[0].(map[string]interface{})
|
|
|
|
|
|
|
|
loggingEnabled := &s3.LoggingEnabled{}
|
|
|
|
if val, ok := c["target_bucket"]; ok {
|
|
|
|
loggingEnabled.TargetBucket = aws.String(val.(string))
|
|
|
|
}
|
|
|
|
if val, ok := c["target_prefix"]; ok {
|
|
|
|
loggingEnabled.TargetPrefix = aws.String(val.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
loggingStatus.LoggingEnabled = loggingEnabled
|
|
|
|
}
|
|
|
|
|
|
|
|
i := &s3.PutBucketLoggingInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
BucketLoggingStatus: loggingStatus,
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 put bucket logging: %#v", i)
|
|
|
|
|
|
|
|
_, err := s3conn.PutBucketLogging(i)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 logging: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-05-18 18:33:03 +02:00
|
|
|
func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
enableAcceleration := d.Get("acceleration_status").(string)
|
|
|
|
|
|
|
|
i := &s3.PutBucketAccelerateConfigurationInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
AccelerateConfiguration: &s3.AccelerateConfiguration{
|
|
|
|
Status: aws.String(enableAcceleration),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i)
|
|
|
|
|
|
|
|
_, err := s3conn.PutBucketAccelerateConfiguration(i)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 acceleration: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-10 01:01:17 +02:00
|
|
|
func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
payer := d.Get("request_payer").(string)
|
|
|
|
|
|
|
|
i := &s3.PutBucketRequestPaymentInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{
|
|
|
|
Payer: aws.String(payer),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 put bucket request payer: %#v", i)
|
|
|
|
|
|
|
|
_, err := s3conn.PutBucketRequestPayment(i)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 request payer: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-08 11:59:13 +01:00
|
|
|
func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
replicationConfiguration := d.Get("replication_configuration").([]interface{})
|
|
|
|
|
|
|
|
if len(replicationConfiguration) == 0 {
|
|
|
|
i := &s3.DeleteBucketReplicationInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
}
|
|
|
|
|
|
|
|
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
|
|
|
|
if _, err := s3conn.DeleteBucketReplication(i); err != nil {
|
|
|
|
return resource.NonRetryableError(err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error removing S3 bucket replication: %s", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
hasVersioning := false
|
|
|
|
// Validate that bucket versioning is enabled
|
|
|
|
if versioning, ok := d.GetOk("versioning"); ok {
|
2016-12-12 23:34:03 +01:00
|
|
|
v := versioning.([]interface{})
|
2016-12-08 11:59:13 +01:00
|
|
|
|
|
|
|
if v[0].(map[string]interface{})["enabled"].(bool) {
|
|
|
|
hasVersioning = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !hasVersioning {
|
|
|
|
return fmt.Errorf("versioning must be enabled to allow S3 bucket replication")
|
|
|
|
}
|
|
|
|
|
|
|
|
c := replicationConfiguration[0].(map[string]interface{})
|
|
|
|
|
|
|
|
rc := &s3.ReplicationConfiguration{}
|
|
|
|
if val, ok := c["role"]; ok {
|
|
|
|
rc.Role = aws.String(val.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
rcRules := c["rules"].(*schema.Set).List()
|
|
|
|
rules := []*s3.ReplicationRule{}
|
|
|
|
for _, v := range rcRules {
|
|
|
|
rr := v.(map[string]interface{})
|
|
|
|
rcRule := &s3.ReplicationRule{
|
|
|
|
Prefix: aws.String(rr["prefix"].(string)),
|
|
|
|
Status: aws.String(rr["status"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
if rrid, ok := rr["id"]; ok {
|
|
|
|
rcRule.ID = aws.String(rrid.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
ruleDestination := &s3.Destination{}
|
|
|
|
if destination, ok := rr["destination"]; ok {
|
|
|
|
dest := destination.(*schema.Set).List()
|
|
|
|
|
|
|
|
bd := dest[0].(map[string]interface{})
|
|
|
|
ruleDestination.Bucket = aws.String(bd["bucket"].(string))
|
|
|
|
|
2016-12-24 11:47:04 +01:00
|
|
|
if storageClass, ok := bd["storage_class"]; ok && storageClass != "" {
|
2016-12-08 11:59:13 +01:00
|
|
|
ruleDestination.StorageClass = aws.String(storageClass.(string))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcRule.Destination = ruleDestination
|
|
|
|
rules = append(rules, rcRule)
|
|
|
|
}
|
|
|
|
|
|
|
|
rc.Rules = rules
|
|
|
|
i := &s3.PutBucketReplicationInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
ReplicationConfiguration: rc,
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i)
|
|
|
|
|
|
|
|
_, err := s3conn.PutBucketReplication(i)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 replication configuration: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-20 12:16:14 +02:00
|
|
|
func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
|
|
|
|
lifecycleRules := d.Get("lifecycle_rule").([]interface{})
|
|
|
|
|
2016-09-24 09:50:04 +02:00
|
|
|
if len(lifecycleRules) == 0 {
|
|
|
|
i := &s3.DeleteBucketLifecycleInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
}
|
|
|
|
|
|
|
|
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
|
|
|
|
if _, err := s3conn.DeleteBucketLifecycle(i); err != nil {
|
|
|
|
return resource.NonRetryableError(err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2016-12-08 11:59:13 +01:00
|
|
|
return fmt.Errorf("Error removing S3 lifecycle: %s", err)
|
2016-09-24 09:50:04 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-20 12:16:14 +02:00
|
|
|
rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules))
|
|
|
|
|
|
|
|
for i, lifecycleRule := range lifecycleRules {
|
|
|
|
r := lifecycleRule.(map[string]interface{})
|
|
|
|
|
|
|
|
rule := &s3.LifecycleRule{
|
|
|
|
Prefix: aws.String(r["prefix"].(string)),
|
|
|
|
}
|
|
|
|
|
|
|
|
// ID
|
|
|
|
if val, ok := r["id"].(string); ok && val != "" {
|
|
|
|
rule.ID = aws.String(val)
|
|
|
|
} else {
|
|
|
|
rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-"))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enabled
|
|
|
|
if val, ok := r["enabled"].(bool); ok && val {
|
|
|
|
rule.Status = aws.String(s3.ExpirationStatusEnabled)
|
|
|
|
} else {
|
|
|
|
rule.Status = aws.String(s3.ExpirationStatusDisabled)
|
|
|
|
}
|
|
|
|
|
|
|
|
// AbortIncompleteMultipartUpload
|
|
|
|
if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 {
|
|
|
|
rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{
|
|
|
|
DaysAfterInitiation: aws.Int64(int64(val)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expiration
|
|
|
|
expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List()
|
|
|
|
if len(expiration) > 0 {
|
|
|
|
e := expiration[0].(map[string]interface{})
|
|
|
|
i := &s3.LifecycleExpiration{}
|
|
|
|
|
|
|
|
if val, ok := e["date"].(string); ok && val != "" {
|
|
|
|
t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
|
|
|
|
}
|
|
|
|
i.Date = aws.Time(t)
|
|
|
|
} else if val, ok := e["days"].(int); ok && val > 0 {
|
|
|
|
i.Days = aws.Int64(int64(val))
|
|
|
|
} else if val, ok := e["expired_object_delete_marker"].(bool); ok {
|
|
|
|
i.ExpiredObjectDeleteMarker = aws.Bool(val)
|
|
|
|
}
|
|
|
|
rule.Expiration = i
|
|
|
|
}
|
|
|
|
|
|
|
|
// NoncurrentVersionExpiration
|
|
|
|
nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List()
|
|
|
|
if len(nc_expiration) > 0 {
|
|
|
|
e := nc_expiration[0].(map[string]interface{})
|
|
|
|
|
|
|
|
if val, ok := e["days"].(int); ok && val > 0 {
|
|
|
|
rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{
|
|
|
|
NoncurrentDays: aws.Int64(int64(val)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transitions
|
|
|
|
transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List()
|
|
|
|
if len(transitions) > 0 {
|
|
|
|
rule.Transitions = make([]*s3.Transition, 0, len(transitions))
|
|
|
|
for _, transition := range transitions {
|
|
|
|
transition := transition.(map[string]interface{})
|
|
|
|
i := &s3.Transition{}
|
|
|
|
if val, ok := transition["date"].(string); ok && val != "" {
|
|
|
|
t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
|
|
|
|
}
|
|
|
|
i.Date = aws.Time(t)
|
|
|
|
} else if val, ok := transition["days"].(int); ok && val > 0 {
|
|
|
|
i.Days = aws.Int64(int64(val))
|
|
|
|
}
|
|
|
|
if val, ok := transition["storage_class"].(string); ok && val != "" {
|
|
|
|
i.StorageClass = aws.String(val)
|
|
|
|
}
|
|
|
|
|
|
|
|
rule.Transitions = append(rule.Transitions, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// NoncurrentVersionTransitions
|
|
|
|
nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List()
|
|
|
|
if len(nc_transitions) > 0 {
|
|
|
|
rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions))
|
|
|
|
for _, transition := range nc_transitions {
|
|
|
|
transition := transition.(map[string]interface{})
|
|
|
|
i := &s3.NoncurrentVersionTransition{}
|
|
|
|
if val, ok := transition["days"].(int); ok && val > 0 {
|
|
|
|
i.NoncurrentDays = aws.Int64(int64(val))
|
|
|
|
}
|
|
|
|
if val, ok := transition["storage_class"].(string); ok && val != "" {
|
|
|
|
i.StorageClass = aws.String(val)
|
|
|
|
}
|
|
|
|
|
|
|
|
rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rules = append(rules, rule)
|
|
|
|
}
|
|
|
|
|
|
|
|
i := &s3.PutBucketLifecycleConfigurationInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
|
|
|
|
Rules: rules,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
|
|
|
|
if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil {
|
|
|
|
return resource.NonRetryableError(err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting S3 lifecycle: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-08 11:59:13 +01:00
|
|
|
func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration) []map[string]interface{} {
|
|
|
|
replication_configuration := make([]map[string]interface{}, 0, 1)
|
|
|
|
m := make(map[string]interface{})
|
|
|
|
|
|
|
|
if r.Role != nil && *r.Role != "" {
|
|
|
|
m["role"] = *r.Role
|
|
|
|
}
|
|
|
|
|
|
|
|
rules := make([]interface{}, 0, len(r.Rules))
|
|
|
|
for _, v := range r.Rules {
|
|
|
|
t := make(map[string]interface{})
|
|
|
|
if v.Destination != nil {
|
|
|
|
rd := make(map[string]interface{})
|
|
|
|
if v.Destination.Bucket != nil {
|
|
|
|
rd["bucket"] = *v.Destination.Bucket
|
|
|
|
}
|
|
|
|
if v.Destination.StorageClass != nil {
|
|
|
|
rd["storage_class"] = *v.Destination.StorageClass
|
|
|
|
}
|
|
|
|
t["destination"] = schema.NewSet(destinationHash, []interface{}{rd})
|
|
|
|
}
|
|
|
|
|
|
|
|
if v.ID != nil {
|
|
|
|
t["id"] = *v.ID
|
|
|
|
}
|
|
|
|
if v.Prefix != nil {
|
|
|
|
t["prefix"] = *v.Prefix
|
|
|
|
}
|
|
|
|
if v.Status != nil {
|
|
|
|
t["status"] = *v.Status
|
|
|
|
}
|
|
|
|
rules = append(rules, t)
|
|
|
|
}
|
|
|
|
m["rules"] = schema.NewSet(rulesHash, rules)
|
|
|
|
|
|
|
|
replication_configuration = append(replication_configuration, m)
|
|
|
|
|
|
|
|
return replication_configuration
|
|
|
|
}
|
|
|
|
|
2016-02-25 19:19:23 +01:00
|
|
|
func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) {
|
|
|
|
withNulls, err := json.Marshal(w)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
var rules []map[string]interface{}
|
2016-09-02 16:24:17 +02:00
|
|
|
if err := json.Unmarshal(withNulls, &rules); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2016-02-25 19:19:23 +01:00
|
|
|
|
|
|
|
var cleanRules []map[string]interface{}
|
|
|
|
for _, rule := range rules {
|
|
|
|
cleanRules = append(cleanRules, removeNil(rule))
|
|
|
|
}
|
|
|
|
|
|
|
|
withoutNulls, err := json.Marshal(cleanRules)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return string(withoutNulls), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func removeNil(data map[string]interface{}) map[string]interface{} {
|
|
|
|
withoutNil := make(map[string]interface{})
|
|
|
|
|
|
|
|
for k, v := range data {
|
|
|
|
if v == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch v.(type) {
|
|
|
|
case map[string]interface{}:
|
|
|
|
withoutNil[k] = removeNil(v.(map[string]interface{}))
|
|
|
|
default:
|
|
|
|
withoutNil[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return withoutNil
|
|
|
|
}
|
|
|
|
|
2016-09-17 20:50:38 +02:00
|
|
|
// DEPRECATED. Please consider using `normalizeJsonString` function instead.
|
2015-05-16 12:11:23 +02:00
|
|
|
func normalizeJson(jsonString interface{}) string {
|
2016-03-07 01:31:33 +01:00
|
|
|
if jsonString == nil || jsonString == "" {
|
2015-05-16 12:11:23 +02:00
|
|
|
return ""
|
|
|
|
}
|
2016-02-25 19:19:23 +01:00
|
|
|
var j interface{}
|
2015-05-16 12:11:23 +02:00
|
|
|
err := json.Unmarshal([]byte(jsonString.(string)), &j)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Sprintf("Error parsing JSON: %s", err)
|
|
|
|
}
|
|
|
|
b, _ := json.Marshal(j)
|
|
|
|
return string(b[:])
|
|
|
|
}
|
|
|
|
|
2015-05-08 16:29:47 +02:00
|
|
|
func normalizeRegion(region string) string {
|
2015-05-01 15:50:49 +02:00
|
|
|
// Default to us-east-1 if the bucket doesn't have a region:
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
|
2015-04-29 21:47:21 +02:00
|
|
|
if region == "" {
|
|
|
|
region = "us-east-1"
|
|
|
|
}
|
|
|
|
|
2015-05-08 16:29:47 +02:00
|
|
|
return region
|
2015-04-29 21:47:21 +02:00
|
|
|
}
|
2015-06-03 17:10:17 +02:00
|
|
|
|
2016-05-18 18:33:03 +02:00
|
|
|
func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
validTypes := map[string]struct{}{
|
|
|
|
"Enabled": struct{}{},
|
|
|
|
"Suspended": struct{}{},
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := validTypes[v.(string)]; !ok {
|
|
|
|
errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended"))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-08-10 01:01:17 +02:00
|
|
|
func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(string)
|
|
|
|
if value != s3.PayerRequester && value != s3.PayerBucketOwner {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q contains an invalid Request Payer type %q. Valid types are either %q or %q",
|
|
|
|
k, value, s3.PayerRequester, s3.PayerBucketOwner))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-01-09 17:22:47 +01:00
|
|
|
// validateS3BucketName validates any S3 bucket name that is not inside the us-east-1 region.
|
|
|
|
// Buckets outside of this region have to be DNS-compliant. After the same restrictions are
|
|
|
|
// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc
|
|
|
|
func validateS3BucketName(value string, region string) error {
|
|
|
|
if region != "us-east-1" {
|
|
|
|
if (len(value) < 3) || (len(value) > 63) {
|
|
|
|
return fmt.Errorf("%q must contain from 3 to 63 characters", value)
|
|
|
|
}
|
|
|
|
if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) {
|
|
|
|
return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value)
|
|
|
|
}
|
|
|
|
if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) {
|
|
|
|
return fmt.Errorf("%q must not be formatted as an IP address", value)
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(value, `.`) {
|
|
|
|
return fmt.Errorf("%q cannot start with a period", value)
|
|
|
|
}
|
|
|
|
if strings.HasSuffix(value, `.`) {
|
|
|
|
return fmt.Errorf("%q cannot end with a period", value)
|
|
|
|
}
|
|
|
|
if strings.Contains(value, `..`) {
|
|
|
|
return fmt.Errorf("%q can be only one period between labels", value)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if len(value) > 255 {
|
|
|
|
return fmt.Errorf("%q must contain less than 256 characters", value)
|
|
|
|
}
|
|
|
|
if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) {
|
|
|
|
return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-20 12:16:14 +02:00
|
|
|
func expirationHash(v interface{}) int {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
if v, ok := m["date"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
if v, ok := m["days"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
|
|
|
|
}
|
|
|
|
if v, ok := m["expired_object_delete_marker"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
|
|
|
|
}
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
func transitionHash(v interface{}) int {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
if v, ok := m["date"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
if v, ok := m["days"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
|
|
|
|
}
|
|
|
|
if v, ok := m["storage_class"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
}
|
|
|
|
|
2016-12-08 11:59:13 +01:00
|
|
|
func rulesHash(v interface{}) int {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
|
|
|
|
if v, ok := m["id"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
if v, ok := m["prefix"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
if v, ok := m["status"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
func destinationHash(v interface{}) int {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
|
|
|
|
if v, ok := m["bucket"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
if v, ok := m["storage_class"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
}
|
|
|
|
|
2015-06-03 17:10:17 +02:00
|
|
|
type S3Website struct {
|
|
|
|
Endpoint, Domain string
|
|
|
|
}
|