terraform/builtin/providers/aws/resource_aws_s3_bucket.go

461 lines
11 KiB
Go
Raw Normal View History

package aws
import (
"encoding/json"
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
)
func resourceAwsS3Bucket() *schema.Resource {
return &schema.Resource{
Create: resourceAwsS3BucketCreate,
Read: resourceAwsS3BucketRead,
2015-03-26 15:17:27 +01:00
Update: resourceAwsS3BucketUpdate,
Delete: resourceAwsS3BucketDelete,
Schema: map[string]*schema.Schema{
"bucket": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"acl": &schema.Schema{
Type: schema.TypeString,
Default: "private",
Optional: true,
ForceNew: true,
},
2015-03-26 15:17:27 +01:00
"policy": &schema.Schema{
Type: schema.TypeString,
Optional: true,
StateFunc: normalizeJson,
},
"website": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"index_document": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"error_document": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"redirect_all_requests_to": &schema.Schema{
Type: schema.TypeString,
ConflictsWith: []string{
"website.0.index_document",
"website.0.error_document",
},
Optional: true,
},
},
},
},
"hosted_zone_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
2015-05-07 18:09:19 +02:00
"region": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"website_endpoint": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
2015-03-26 15:17:27 +01:00
"tags": tagsSchema(),
added force_destroy argument to s3 bucket provider commit a92fe29b909af033c4c57257ddcb6793bfb694aa Author: Michael Austin <m_austin@me.com> Date: Wed May 20 16:35:38 2015 -0400 updated to new style of awserr commit 428271c9b9ca01ed2add1ffa608ab354f520bfa0 Merge: b3bae0e 883e284 Author: Michael Austin <m_austin@me.com> Date: Wed May 20 16:29:00 2015 -0400 Merge branch 'master' into 2544-terraform-s3-forceDelete commit b3bae0efdac81adf8bb448d11cc1ca62eae75d94 Author: Michael Austin <m_austin@me.com> Date: Wed May 20 12:06:36 2015 -0400 removed extra line commit 85eb40fc7ce24f5eb01af10eadde35ebac3c8223 Author: Michael Austin <m_austin@me.com> Date: Tue May 19 14:27:19 2015 -0400 stray [ commit d8a405f7d6880c350ab9fccb70b833d2239d9915 Author: Michael Austin <m_austin@me.com> Date: Tue May 19 14:24:01 2015 -0400 addressed feedback concerning parsing of aws error in a more standard way commit 5b9a5ee613af78e466d89ba772959bb38566f50e Author: Michael Austin <m_austin@me.com> Date: Tue May 19 10:55:22 2015 -0400 clarify comment to highlight recursion commit 91043781f4ba08b075673cd4c7c01792975c2402 Author: Michael Austin <m_austin@me.com> Date: Tue May 19 10:51:13 2015 -0400 addressed feedback about reusing err variable and unneeded parens commit 95e9c3afbd34d4d09a6355b0aaeb52606917b6dc Merge: 2637edf db095e2 Author: Michael Austin <m_austin@me.com> Date: Mon May 18 19:15:36 2015 -0400 Merge branch 'master' into 2544-terraform-s3-forceDelete commit 2637edfc48a23b2951032b1e974d7097602c4715 Author: Michael Austin <m_austin@me.com> Date: Fri May 15 15:12:41 2015 -0400 optimize delete to delete up to 1000 at once instead of one at a time commit 1441eb2ccf13fa34f4d8c43257c2e471108738e4 Author: Michael Austin <m_austin@me.com> Date: Fri May 15 12:34:53 2015 -0400 Revert "hook new resource provider into configuration" This reverts commit e14a1ade5315e3276e039b745a40ce69a64518b5. commit b532fa22022e34e4a8ea09024874bb0e8265f3ac Author: Michael Austin <m_austin@me.com> Date: Fri May 15 12:34:49 2015 -0400 this file should not be in this branch commit 645c0b66c6f000a6da50ebeca1d867a63e5fd9f1 Author: Michael Austin <m_austin@me.com> Date: Thu May 14 21:15:29 2015 -0400 buckets tagged force_destroy will delete all files and then delete buckets commit ac50cae214ce88e22bb1184386c56b8ba8c057f7 Author: Michael Austin <m_austin@me.com> Date: Thu May 14 12:41:40 2015 -0400 added code to delete policy from s3 bucket commit cd45e45d6d04a3956fe35c178d5e816ba18d1051 Author: Michael Austin <m_austin@me.com> Date: Thu May 14 12:27:13 2015 -0400 added code to read bucket policy from bucket, however, it's not working as expected currently commit 0d3d51abfddec9c39c60d8f7b81e8fcd88e117b9 Merge: 31ffdea 8a3b75d Author: Michael Austin <m_austin@me.com> Date: Thu May 14 08:38:06 2015 -0400 Merge remote-tracking branch 'hashi_origin/master' into 2544-terraform-s3-policy commit 31ffdea96ba3d5ddf5d42f862e68c1c133e49925 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 16:01:52 2015 -0400 add name for use with resouce id commit b41c7375dbd9ae43ee0d421cf2432c1eb174b5b0 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 14:48:24 2015 -0400 Revert "working policy assignment" This reverts commit 0975a70c37eaa310d2bdfe6f77009253c5e450c7. commit b926b11521878f1527bdcaba3c1b7c0b973e89e5 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 14:35:02 2015 -0400 moved policy to it's own provider commit 233a5f443c13d71f3ddc06cf034d07cb8231b4dd Merge: e14a1ad c003e96 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 12:39:14 2015 -0400 merged origin/master commit e14a1ade5315e3276e039b745a40ce69a64518b5 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 12:26:51 2015 -0400 hook new resource provider into configuration commit 455b409cb853faae3e45a0a3d4e2859ffc4ed865 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 12:26:15 2015 -0400 dummy resource provider commit 0975a70c37eaa310d2bdfe6f77009253c5e450c7 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 09:42:31 2015 -0400 working policy assignment commit 3ab901d6b3ab605adc0a8cb703aa047a513b68d4 Author: Michael Austin <m_austin@me.com> Date: Tue May 12 10:39:56 2015 -0400 added policy string to schema
2015-05-21 01:06:27 +02:00
"force_destroy": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
},
}
}
func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
s3conn := meta.(*AWSClient).s3conn
awsRegion := meta.(*AWSClient).region
// Get the bucket and acl
bucket := d.Get("bucket").(string)
acl := d.Get("acl").(string)
log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
req := &s3.CreateBucketInput{
Bucket: aws.String(bucket),
ACL: aws.String(acl),
}
// Special case us-east-1 region and do not set the LocationConstraint.
// See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html
if awsRegion != "us-east-1" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: aws.String(awsRegion),
}
}
_, err := s3conn.CreateBucket(req)
if err != nil {
return fmt.Errorf("Error creating S3 bucket: %s", err)
}
// Assign the bucket name as the resource ID
d.SetId(bucket)
2015-03-26 15:17:27 +01:00
return resourceAwsS3BucketUpdate(d, meta)
}
func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
s3conn := meta.(*AWSClient).s3conn
if err := setTagsS3(s3conn, d); err != nil {
return err
}
if d.HasChange("policy") {
if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil {
return err
}
}
if d.HasChange("website") {
if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil {
return err
}
}
2015-03-26 15:17:27 +01:00
return resourceAwsS3BucketRead(d, meta)
}
func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
s3conn := meta.(*AWSClient).s3conn
var err error
_, err = s3conn.HeadBucket(&s3.HeadBucketInput{
Bucket: aws.String(d.Id()),
})
if err != nil {
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 {
d.SetId("")
} else {
// some of the AWS SDK's errors can be empty strings, so let's add
// some additional context.
return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err)
}
}
// Read the policy
pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{
Bucket: aws.String(d.Id()),
})
log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol)
if err != nil {
if err := d.Set("policy", ""); err != nil {
return err
}
} else {
if v := pol.Policy; v == nil {
if err := d.Set("policy", ""); err != nil {
return err
}
} else if err := d.Set("policy", normalizeJson(*v)); err != nil {
return err
}
}
2015-05-06 14:12:40 +02:00
// Read the website configuration
ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{
Bucket: aws.String(d.Id()),
})
var websites []map[string]interface{}
if err == nil {
w := make(map[string]interface{})
if v := ws.IndexDocument; v != nil {
w["index_document"] = *v.Suffix
}
if v := ws.ErrorDocument; v != nil {
w["error_document"] = *v.Key
}
if v := ws.RedirectAllRequestsTo; v != nil {
w["redirect_all_requests_to"] = *v.HostName
}
websites = append(websites, w)
2015-05-06 14:12:40 +02:00
}
if err := d.Set("website", websites); err != nil {
return err
}
2015-05-07 18:09:19 +02:00
// Add the region as an attribute
location, err := s3conn.GetBucketLocation(
&s3.GetBucketLocationInput{
Bucket: aws.String(d.Id()),
},
)
if err != nil {
return err
}
var region string
if location.LocationConstraint != nil {
region = *location.LocationConstraint
}
2015-05-08 16:29:47 +02:00
region = normalizeRegion(region)
2015-05-07 18:09:19 +02:00
if err := d.Set("region", region); err != nil {
return err
}
// Add the hosted zone ID for this bucket's region as an attribute
hostedZoneID := HostedZoneIDForRegion(region)
if err := d.Set("hosted_zone_id", hostedZoneID); err != nil {
return err
}
// Add website_endpoint as an attribute
endpoint, err := websiteEndpoint(s3conn, d)
if err != nil {
return err
}
if err := d.Set("website_endpoint", endpoint); err != nil {
return err
}
tagSet, err := getTagSetS3(s3conn, d.Id())
if err != nil {
return err
}
if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil {
return err
}
return nil
}
func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error {
s3conn := meta.(*AWSClient).s3conn
log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id())
_, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{
Bucket: aws.String(d.Id()),
})
if err != nil {
added force_destroy argument to s3 bucket provider commit a92fe29b909af033c4c57257ddcb6793bfb694aa Author: Michael Austin <m_austin@me.com> Date: Wed May 20 16:35:38 2015 -0400 updated to new style of awserr commit 428271c9b9ca01ed2add1ffa608ab354f520bfa0 Merge: b3bae0e 883e284 Author: Michael Austin <m_austin@me.com> Date: Wed May 20 16:29:00 2015 -0400 Merge branch 'master' into 2544-terraform-s3-forceDelete commit b3bae0efdac81adf8bb448d11cc1ca62eae75d94 Author: Michael Austin <m_austin@me.com> Date: Wed May 20 12:06:36 2015 -0400 removed extra line commit 85eb40fc7ce24f5eb01af10eadde35ebac3c8223 Author: Michael Austin <m_austin@me.com> Date: Tue May 19 14:27:19 2015 -0400 stray [ commit d8a405f7d6880c350ab9fccb70b833d2239d9915 Author: Michael Austin <m_austin@me.com> Date: Tue May 19 14:24:01 2015 -0400 addressed feedback concerning parsing of aws error in a more standard way commit 5b9a5ee613af78e466d89ba772959bb38566f50e Author: Michael Austin <m_austin@me.com> Date: Tue May 19 10:55:22 2015 -0400 clarify comment to highlight recursion commit 91043781f4ba08b075673cd4c7c01792975c2402 Author: Michael Austin <m_austin@me.com> Date: Tue May 19 10:51:13 2015 -0400 addressed feedback about reusing err variable and unneeded parens commit 95e9c3afbd34d4d09a6355b0aaeb52606917b6dc Merge: 2637edf db095e2 Author: Michael Austin <m_austin@me.com> Date: Mon May 18 19:15:36 2015 -0400 Merge branch 'master' into 2544-terraform-s3-forceDelete commit 2637edfc48a23b2951032b1e974d7097602c4715 Author: Michael Austin <m_austin@me.com> Date: Fri May 15 15:12:41 2015 -0400 optimize delete to delete up to 1000 at once instead of one at a time commit 1441eb2ccf13fa34f4d8c43257c2e471108738e4 Author: Michael Austin <m_austin@me.com> Date: Fri May 15 12:34:53 2015 -0400 Revert "hook new resource provider into configuration" This reverts commit e14a1ade5315e3276e039b745a40ce69a64518b5. commit b532fa22022e34e4a8ea09024874bb0e8265f3ac Author: Michael Austin <m_austin@me.com> Date: Fri May 15 12:34:49 2015 -0400 this file should not be in this branch commit 645c0b66c6f000a6da50ebeca1d867a63e5fd9f1 Author: Michael Austin <m_austin@me.com> Date: Thu May 14 21:15:29 2015 -0400 buckets tagged force_destroy will delete all files and then delete buckets commit ac50cae214ce88e22bb1184386c56b8ba8c057f7 Author: Michael Austin <m_austin@me.com> Date: Thu May 14 12:41:40 2015 -0400 added code to delete policy from s3 bucket commit cd45e45d6d04a3956fe35c178d5e816ba18d1051 Author: Michael Austin <m_austin@me.com> Date: Thu May 14 12:27:13 2015 -0400 added code to read bucket policy from bucket, however, it's not working as expected currently commit 0d3d51abfddec9c39c60d8f7b81e8fcd88e117b9 Merge: 31ffdea 8a3b75d Author: Michael Austin <m_austin@me.com> Date: Thu May 14 08:38:06 2015 -0400 Merge remote-tracking branch 'hashi_origin/master' into 2544-terraform-s3-policy commit 31ffdea96ba3d5ddf5d42f862e68c1c133e49925 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 16:01:52 2015 -0400 add name for use with resouce id commit b41c7375dbd9ae43ee0d421cf2432c1eb174b5b0 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 14:48:24 2015 -0400 Revert "working policy assignment" This reverts commit 0975a70c37eaa310d2bdfe6f77009253c5e450c7. commit b926b11521878f1527bdcaba3c1b7c0b973e89e5 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 14:35:02 2015 -0400 moved policy to it's own provider commit 233a5f443c13d71f3ddc06cf034d07cb8231b4dd Merge: e14a1ad c003e96 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 12:39:14 2015 -0400 merged origin/master commit e14a1ade5315e3276e039b745a40ce69a64518b5 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 12:26:51 2015 -0400 hook new resource provider into configuration commit 455b409cb853faae3e45a0a3d4e2859ffc4ed865 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 12:26:15 2015 -0400 dummy resource provider commit 0975a70c37eaa310d2bdfe6f77009253c5e450c7 Author: Michael Austin <m_austin@me.com> Date: Wed May 13 09:42:31 2015 -0400 working policy assignment commit 3ab901d6b3ab605adc0a8cb703aa047a513b68d4 Author: Michael Austin <m_austin@me.com> Date: Tue May 12 10:39:56 2015 -0400 added policy string to schema
2015-05-21 01:06:27 +02:00
ec2err, ok := err.(awserr.Error)
if ok && ec2err.Code() == "BucketNotEmpty" {
if d.Get("force_destroy").(bool) {
// bucket may have things delete them
log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err)
bucket := d.Get("bucket").(string)
resp, err := s3conn.ListObjects(
&s3.ListObjectsInput{
Bucket: aws.String(bucket),
},
)
if err != nil {
return fmt.Errorf("Error S3 Bucket list Objects err: %s", err)
}
objectsToDelete := make([]*s3.ObjectIdentifier, len(resp.Contents))
for i, v := range resp.Contents {
objectsToDelete[i] = &s3.ObjectIdentifier{
Key: v.Key,
}
}
_, err = s3conn.DeleteObjects(
&s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &s3.Delete{
Objects: objectsToDelete,
},
},
)
if err != nil {
return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err)
}
// this line recurses until all objects are deleted or an error is returned
return resourceAwsS3BucketDelete(d, meta)
}
}
return fmt.Errorf("Error deleting S3 Bucket: %s", err)
}
return nil
}
func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
bucket := d.Get("bucket").(string)
policy := d.Get("policy").(string)
if policy != "" {
log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy)
_, err := s3conn.PutBucketPolicy(&s3.PutBucketPolicyInput{
Bucket: aws.String(bucket),
Policy: aws.String(policy),
})
if err != nil {
return fmt.Errorf("Error putting S3 policy: %s", err)
}
} else {
log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy)
_, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{
Bucket: aws.String(bucket),
})
if err != nil {
return fmt.Errorf("Error deleting S3 policy: %s", err)
}
}
return nil
}
func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
ws := d.Get("website").([]interface{})
if len(ws) == 1 {
w := ws[0].(map[string]interface{})
return resourceAwsS3BucketWebsitePut(s3conn, d, w)
} else if len(ws) == 0 {
return resourceAwsS3BucketWebsiteDelete(s3conn, d)
} else {
return fmt.Errorf("Cannot specify more than one website.")
}
}
func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error {
bucket := d.Get("bucket").(string)
indexDocument := website["index_document"].(string)
errorDocument := website["error_document"].(string)
redirectAllRequestsTo := website["redirect_all_requests_to"].(string)
if indexDocument == "" && redirectAllRequestsTo == "" {
return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.")
}
websiteConfiguration := &s3.WebsiteConfiguration{}
if indexDocument != "" {
websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)}
}
if errorDocument != "" {
websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)}
}
if redirectAllRequestsTo != "" {
websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)}
}
putInput := &s3.PutBucketWebsiteInput{
Bucket: aws.String(bucket),
WebsiteConfiguration: websiteConfiguration,
}
2015-05-07 17:03:28 +02:00
log.Printf("[DEBUG] S3 put bucket website: %#v", putInput)
_, err := s3conn.PutBucketWebsite(putInput)
if err != nil {
return fmt.Errorf("Error putting S3 website: %s", err)
}
return nil
}
func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error {
bucket := d.Get("bucket").(string)
deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)}
2015-05-07 17:03:28 +02:00
log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput)
_, err := s3conn.DeleteBucketWebsite(deleteInput)
if err != nil {
return fmt.Errorf("Error deleting S3 website: %s", err)
}
return nil
}
func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (string, error) {
// If the bucket doesn't have a website configuration, return an empty
// endpoint
if _, ok := d.GetOk("website"); !ok {
return "", nil
}
bucket := d.Get("bucket").(string)
// Lookup the region for this bucket
location, err := s3conn.GetBucketLocation(
&s3.GetBucketLocationInput{
Bucket: aws.String(bucket),
},
)
if err != nil {
return "", err
}
var region string
if location.LocationConstraint != nil {
region = *location.LocationConstraint
}
return WebsiteEndpointUrl(bucket, region), nil
}
func WebsiteEndpointUrl(bucket string, region string) string {
2015-05-08 16:29:47 +02:00
region = normalizeRegion(region)
return fmt.Sprintf("%s.s3-website-%s.amazonaws.com", bucket, region)
}
func normalizeJson(jsonString interface{}) string {
if jsonString == nil {
return ""
}
j := make(map[string]interface{})
err := json.Unmarshal([]byte(jsonString.(string)), &j)
if err != nil {
return fmt.Sprintf("Error parsing JSON: %s", err)
}
b, _ := json.Marshal(j)
return string(b[:])
}
2015-05-08 16:29:47 +02:00
func normalizeRegion(region string) string {
// Default to us-east-1 if the bucket doesn't have a region:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
if region == "" {
region = "us-east-1"
}
2015-05-08 16:29:47 +02:00
return region
}