2015-05-26 16:44:02 +02:00
|
|
|
package aws
|
|
|
|
|
|
|
|
import (
|
2015-09-10 20:37:17 +02:00
|
|
|
"bytes"
|
2015-05-26 16:44:02 +02:00
|
|
|
"fmt"
|
2015-09-10 20:37:17 +02:00
|
|
|
"io"
|
2015-05-26 16:44:02 +02:00
|
|
|
"log"
|
|
|
|
"os"
|
2016-08-10 06:05:39 +02:00
|
|
|
"sort"
|
2016-02-23 13:13:19 +01:00
|
|
|
"strings"
|
2015-05-26 16:44:02 +02:00
|
|
|
|
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2015-11-13 18:21:22 +01:00
|
|
|
"github.com/mitchellh/go-homedir"
|
2015-05-26 16:44:02 +02:00
|
|
|
|
2015-07-30 22:17:37 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
2015-08-05 21:22:29 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
2015-07-30 22:17:37 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
2015-05-26 16:44:02 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
func resourceAwsS3BucketObject() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceAwsS3BucketObjectPut,
|
|
|
|
Read: resourceAwsS3BucketObjectRead,
|
2016-02-23 13:16:10 +01:00
|
|
|
Update: resourceAwsS3BucketObjectPut,
|
2015-05-26 16:44:02 +02:00
|
|
|
Delete: resourceAwsS3BucketObjectDelete,
|
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"bucket": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2016-08-10 06:05:39 +02:00
|
|
|
"acl": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Default: "private",
|
|
|
|
Optional: true,
|
|
|
|
ValidateFunc: validateS3BucketObjectAclType,
|
|
|
|
},
|
|
|
|
|
2015-09-17 21:10:35 +02:00
|
|
|
"cache_control": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"content_disposition": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"content_encoding": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"content_language": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"content_type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2015-10-09 00:14:34 +02:00
|
|
|
Computed: true,
|
2015-09-17 21:10:35 +02:00
|
|
|
},
|
|
|
|
|
2015-05-26 16:44:02 +02:00
|
|
|
"key": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"source": &schema.Schema{
|
2015-09-10 20:37:17 +02:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2015-09-09 23:13:36 +02:00
|
|
|
ConflictsWith: []string{"content"},
|
2015-05-26 16:44:02 +02:00
|
|
|
},
|
2015-07-30 22:17:37 +02:00
|
|
|
|
2015-09-10 20:37:17 +02:00
|
|
|
"content": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2015-09-09 23:13:36 +02:00
|
|
|
ConflictsWith: []string{"source"},
|
2015-05-26 16:44:02 +02:00
|
|
|
},
|
2015-07-30 22:17:37 +02:00
|
|
|
|
2016-08-15 08:30:47 +02:00
|
|
|
"storage_class": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ValidateFunc: validateS3BucketObjectStorageClassType,
|
|
|
|
},
|
|
|
|
|
2016-03-04 01:20:01 +01:00
|
|
|
"kms_key_id": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
|
2015-07-30 22:17:37 +02:00
|
|
|
"etag": &schema.Schema{
|
2016-02-23 13:16:10 +01:00
|
|
|
Type: schema.TypeString,
|
|
|
|
// This will conflict with SSE-C and SSE-KMS encryption and multi-part upload
|
|
|
|
// if/when it's actually implemented. The Etag then won't match raw-file MD5.
|
|
|
|
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
|
|
|
|
Optional: true,
|
2015-07-30 22:17:37 +02:00
|
|
|
Computed: true,
|
|
|
|
},
|
2016-02-23 13:16:36 +01:00
|
|
|
|
|
|
|
"version_id": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2015-05-26 16:44:02 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
s3conn := meta.(*AWSClient).s3conn
|
|
|
|
|
2015-09-09 23:13:36 +02:00
|
|
|
var body io.ReadSeeker
|
2015-05-26 16:44:02 +02:00
|
|
|
|
2015-09-09 23:13:36 +02:00
|
|
|
if v, ok := d.GetOk("source"); ok {
|
|
|
|
source := v.(string)
|
2015-11-13 18:21:22 +01:00
|
|
|
path, err := homedir.Expand(source)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error expanding homedir in source (%s): %s", source, err)
|
|
|
|
}
|
|
|
|
file, err := os.Open(path)
|
2015-09-09 23:13:36 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err)
|
|
|
|
}
|
2015-05-26 16:44:02 +02:00
|
|
|
|
2015-09-09 23:13:36 +02:00
|
|
|
body = file
|
|
|
|
} else if v, ok := d.GetOk("content"); ok {
|
|
|
|
content := v.(string)
|
|
|
|
body = bytes.NewReader([]byte(content))
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("Must specify \"source\" or \"content\" field")
|
2015-05-26 16:44:02 +02:00
|
|
|
}
|
2016-02-23 13:14:33 +01:00
|
|
|
|
2016-03-04 18:22:43 +01:00
|
|
|
if _, ok := d.GetOk("kms_key_id"); ok {
|
|
|
|
if _, ok := d.GetOk("etag"); ok {
|
2016-03-10 04:06:36 +01:00
|
|
|
return fmt.Errorf("Unable to specify 'kms_key_id' and 'etag' together because 'etag' wouldn't equal the MD5 digest of the raw object data")
|
2016-03-04 18:22:43 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-15 08:30:47 +02:00
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
key := d.Get("key").(string)
|
|
|
|
|
2015-10-08 18:24:33 +02:00
|
|
|
putInput := &s3.PutObjectInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
Key: aws.String(key),
|
2016-08-15 08:30:47 +02:00
|
|
|
ACL: aws.String(d.Get("acl").(string)),
|
2015-10-12 17:51:27 +02:00
|
|
|
Body: body,
|
2015-10-08 18:24:33 +02:00
|
|
|
}
|
2015-05-26 16:44:02 +02:00
|
|
|
|
2016-08-15 08:30:47 +02:00
|
|
|
if v, ok := d.GetOk("storage_class"); ok {
|
|
|
|
putInput.StorageClass = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
2015-10-08 18:24:33 +02:00
|
|
|
if v, ok := d.GetOk("cache_control"); ok {
|
|
|
|
putInput.CacheControl = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("content_type"); ok {
|
|
|
|
putInput.ContentType = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("content_encoding"); ok {
|
|
|
|
putInput.ContentEncoding = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("content_language"); ok {
|
|
|
|
putInput.ContentLanguage = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("content_disposition"); ok {
|
|
|
|
putInput.ContentDisposition = aws.String(v.(string))
|
|
|
|
}
|
|
|
|
|
2016-03-04 01:20:01 +01:00
|
|
|
if v, ok := d.GetOk("kms_key_id"); ok {
|
|
|
|
putInput.SSEKMSKeyId = aws.String(v.(string))
|
2016-03-04 18:22:43 +01:00
|
|
|
putInput.ServerSideEncryption = aws.String("aws:kms")
|
2016-03-04 01:20:01 +01:00
|
|
|
}
|
|
|
|
|
2016-03-04 18:22:43 +01:00
|
|
|
resp, err := s3conn.PutObject(putInput)
|
2015-05-26 16:44:02 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err)
|
|
|
|
}
|
|
|
|
|
2016-02-23 13:13:19 +01:00
|
|
|
// See https://forums.aws.amazon.com/thread.jspa?threadID=44003
|
|
|
|
d.Set("etag", strings.Trim(*resp.ETag, `"`))
|
|
|
|
|
2016-02-23 13:16:36 +01:00
|
|
|
d.Set("version_id", resp.VersionId)
|
2015-07-30 22:17:37 +02:00
|
|
|
d.SetId(key)
|
2016-02-23 13:16:10 +01:00
|
|
|
return resourceAwsS3BucketObjectRead(d, meta)
|
2015-05-26 16:44:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
s3conn := meta.(*AWSClient).s3conn
|
|
|
|
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
key := d.Get("key").(string)
|
2015-07-30 22:17:37 +02:00
|
|
|
etag := d.Get("etag").(string)
|
2015-05-26 16:44:02 +02:00
|
|
|
|
|
|
|
resp, err := s3conn.HeadObject(
|
|
|
|
&s3.HeadObjectInput{
|
2015-07-30 22:17:37 +02:00
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
Key: aws.String(key),
|
|
|
|
IfMatch: aws.String(etag),
|
2015-05-26 16:44:02 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
2015-08-05 21:22:29 +02:00
|
|
|
// If S3 returns a 404 Request Failure, mark the object as destroyed
|
|
|
|
if awsErr, ok := err.(awserr.RequestFailure); ok && awsErr.StatusCode() == 404 {
|
|
|
|
d.SetId("")
|
|
|
|
log.Printf("[WARN] Error Reading Object (%s), object not found (HTTP status 404)", key)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
2015-05-26 16:44:02 +02:00
|
|
|
}
|
2016-08-15 08:30:47 +02:00
|
|
|
log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp)
|
2015-05-26 16:44:02 +02:00
|
|
|
|
2015-09-17 21:10:35 +02:00
|
|
|
d.Set("cache_control", resp.CacheControl)
|
|
|
|
d.Set("content_disposition", resp.ContentDisposition)
|
|
|
|
d.Set("content_encoding", resp.ContentEncoding)
|
|
|
|
d.Set("content_language", resp.ContentLanguage)
|
|
|
|
d.Set("content_type", resp.ContentType)
|
2016-02-23 13:16:36 +01:00
|
|
|
d.Set("version_id", resp.VersionId)
|
2016-03-04 01:20:01 +01:00
|
|
|
d.Set("kms_key_id", resp.SSEKMSKeyId)
|
2015-09-17 21:10:35 +02:00
|
|
|
|
2016-08-15 08:30:47 +02:00
|
|
|
// The "STANDARD" (which is also the default) storage
|
|
|
|
// class when set would not be included in the results.
|
|
|
|
d.Set("storage_class", s3.StorageClassStandard)
|
|
|
|
if resp.StorageClass != nil {
|
|
|
|
d.Set("storage_class", resp.StorageClass)
|
|
|
|
}
|
|
|
|
|
2015-05-26 16:44:02 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
s3conn := meta.(*AWSClient).s3conn
|
|
|
|
|
|
|
|
bucket := d.Get("bucket").(string)
|
|
|
|
key := d.Get("key").(string)
|
|
|
|
|
2016-02-23 13:16:36 +01:00
|
|
|
if _, ok := d.GetOk("version_id"); ok {
|
|
|
|
// Bucket is versioned, we need to delete all versions
|
|
|
|
vInput := s3.ListObjectVersionsInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
Prefix: aws.String(key),
|
|
|
|
}
|
|
|
|
out, err := s3conn.ListObjectVersions(&vInput)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Failed listing S3 object versions: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range out.Versions {
|
|
|
|
input := s3.DeleteObjectInput{
|
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
Key: aws.String(key),
|
|
|
|
VersionId: v.VersionId,
|
|
|
|
}
|
|
|
|
_, err := s3conn.DeleteObject(&input)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting S3 object version of %s:\n %s:\n %s",
|
|
|
|
key, v, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Just delete the object
|
|
|
|
input := s3.DeleteObjectInput{
|
2015-05-26 16:44:02 +02:00
|
|
|
Bucket: aws.String(bucket),
|
|
|
|
Key: aws.String(key),
|
2016-02-23 13:16:36 +01:00
|
|
|
}
|
|
|
|
_, err := s3conn.DeleteObject(&input)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting S3 bucket object: %s", err)
|
|
|
|
}
|
2015-05-26 16:44:02 +02:00
|
|
|
}
|
2016-02-23 13:16:36 +01:00
|
|
|
|
2015-05-26 16:44:02 +02:00
|
|
|
return nil
|
|
|
|
}
|
2016-08-10 06:05:39 +02:00
|
|
|
|
|
|
|
func validateS3BucketObjectAclType(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(string)
|
|
|
|
|
|
|
|
cannedAcls := map[string]bool{
|
|
|
|
s3.ObjectCannedACLPrivate: true,
|
|
|
|
s3.ObjectCannedACLPublicRead: true,
|
|
|
|
s3.ObjectCannedACLPublicReadWrite: true,
|
|
|
|
s3.ObjectCannedACLAuthenticatedRead: true,
|
|
|
|
s3.ObjectCannedACLAwsExecRead: true,
|
|
|
|
s3.ObjectCannedACLBucketOwnerRead: true,
|
|
|
|
s3.ObjectCannedACLBucketOwnerFullControl: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
sentenceJoin := func(m map[string]bool) string {
|
|
|
|
keys := make([]string, 0, len(m))
|
|
|
|
for k := range m {
|
|
|
|
keys = append(keys, fmt.Sprintf("%q", k))
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
length := len(keys)
|
|
|
|
words := make([]string, length)
|
|
|
|
copy(words, keys)
|
|
|
|
|
|
|
|
words[length-1] = fmt.Sprintf("or %s", words[length-1])
|
|
|
|
return strings.Join(words, ", ")
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := cannedAcls[value]; !ok {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q contains an invalid canned ACL type %q. Valid types are either %s",
|
|
|
|
k, value, sentenceJoin(cannedAcls)))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2016-08-15 08:30:47 +02:00
|
|
|
|
|
|
|
func validateS3BucketObjectStorageClassType(v interface{}, k string) (ws []string, errors []error) {
|
|
|
|
value := v.(string)
|
|
|
|
|
|
|
|
storageClass := map[string]bool{
|
|
|
|
s3.StorageClassStandard: true,
|
|
|
|
s3.StorageClassReducedRedundancy: true,
|
|
|
|
s3.StorageClassStandardIa: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := storageClass[value]; !ok {
|
|
|
|
errors = append(errors, fmt.Errorf(
|
|
|
|
"%q contains an invalid Storage Class type %q. Valid types are either %q, %q, or %q",
|
|
|
|
k, value, s3.StorageClassStandard, s3.StorageClassReducedRedundancy,
|
|
|
|
s3.StorageClassStandardIa))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|