Add ability to set Storage Class in aws_s3_bucket_object. (#8174)
An S3 Bucket owner may wish to select a different underlying storage class for an object. This commit adds an optional "storage_class" attribute to the aws_s3_bucket_object resource so that the owner of the S3 bucket can specify an appropriate storage class to use when creating an object. Signed-off-by: Krzysztof Wilczynski <krzysztof.wilczynski@linux.com>
This commit is contained in:
parent
b02dacfb7e
commit
e943851429
|
@ -155,10 +155,16 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e
|
|||
d.Set("metadata", pointersMapToStringList(out.Metadata))
|
||||
d.Set("server_side_encryption", out.ServerSideEncryption)
|
||||
d.Set("sse_kms_key_id", out.SSEKMSKeyId)
|
||||
d.Set("storage_class", out.StorageClass)
|
||||
d.Set("version_id", out.VersionId)
|
||||
d.Set("website_redirect_location", out.WebsiteRedirectLocation)
|
||||
|
||||
// The "STANDARD" (which is also the default) storage
|
||||
// class when set would not be included in the results.
|
||||
d.Set("storage_class", s3.StorageClassStandard)
|
||||
if out.StorageClass != nil {
|
||||
d.Set("storage_class", out.StorageClass)
|
||||
}
|
||||
|
||||
if isContentTypeAllowed(out.ContentType) {
|
||||
input := s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
|
|
|
@ -154,12 +154,12 @@ func TestAccDataSourceAWSS3BucketObject_allParams(t *testing.T) {
|
|||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "server_side_encryption", ""),
|
||||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "sse_kms_key_id", ""),
|
||||
// Supported, but difficult to reproduce in short testing time
|
||||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "storage_class", ""),
|
||||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "storage_class", "STANDARD"),
|
||||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "expiration", ""),
|
||||
// Currently unsupported in aws_s3_bucket_object resource
|
||||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "expires", ""),
|
||||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "website_redirect_location", ""),
|
||||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.#", "0"),
|
||||
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.%", "0"),
|
||||
),
|
||||
},
|
||||
},
|
|
@ -82,6 +82,13 @@ func resourceAwsS3BucketObject() *schema.Resource {
|
|||
ConflictsWith: []string{"source"},
|
||||
},
|
||||
|
||||
"storage_class": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validateS3BucketObjectStorageClassType,
|
||||
},
|
||||
|
||||
"kms_key_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
@ -107,9 +114,6 @@ func resourceAwsS3BucketObject() *schema.Resource {
|
|||
func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) error {
|
||||
s3conn := meta.(*AWSClient).s3conn
|
||||
|
||||
bucket := d.Get("bucket").(string)
|
||||
key := d.Get("key").(string)
|
||||
acl := d.Get("acl").(string)
|
||||
var body io.ReadSeeker
|
||||
|
||||
if v, ok := d.GetOk("source"); ok {
|
||||
|
@ -137,13 +141,20 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro
|
|||
}
|
||||
}
|
||||
|
||||
bucket := d.Get("bucket").(string)
|
||||
key := d.Get("key").(string)
|
||||
|
||||
putInput := &s3.PutObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(key),
|
||||
ACL: aws.String(acl),
|
||||
ACL: aws.String(d.Get("acl").(string)),
|
||||
Body: body,
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("storage_class"); ok {
|
||||
putInput.StorageClass = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("cache_control"); ok {
|
||||
putInput.CacheControl = aws.String(v.(string))
|
||||
}
|
||||
|
@ -205,6 +216,7 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err
|
|||
}
|
||||
return err
|
||||
}
|
||||
log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp)
|
||||
|
||||
d.Set("cache_control", resp.CacheControl)
|
||||
d.Set("content_disposition", resp.ContentDisposition)
|
||||
|
@ -214,7 +226,13 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err
|
|||
d.Set("version_id", resp.VersionId)
|
||||
d.Set("kms_key_id", resp.SSEKMSKeyId)
|
||||
|
||||
log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp)
|
||||
// The "STANDARD" (which is also the default) storage
|
||||
// class when set would not be included in the results.
|
||||
d.Set("storage_class", s3.StorageClassStandard)
|
||||
if resp.StorageClass != nil {
|
||||
d.Set("storage_class", resp.StorageClass)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -297,3 +315,21 @@ func validateS3BucketObjectAclType(v interface{}, k string) (ws []string, errors
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateS3BucketObjectStorageClassType(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
|
||||
storageClass := map[string]bool{
|
||||
s3.StorageClassStandard: true,
|
||||
s3.StorageClassReducedRedundancy: true,
|
||||
s3.StorageClassStandardIa: true,
|
||||
}
|
||||
|
||||
if _, ok := storageClass[value]; !ok {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q contains an invalid Storage Class type %q. Valid types are either %q, %q, or %q",
|
||||
k, value, s3.StorageClassStandard, s3.StorageClassReducedRedundancy,
|
||||
s3.StorageClassStandardIa))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -309,6 +309,34 @@ func TestAccAWSS3BucketObject_acl(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSS3BucketObjectAcl(n string, expectedPerms []string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, _ := s.RootModule().Resources[n]
|
||||
s3conn := testAccProvider.Meta().(*AWSClient).s3conn
|
||||
|
||||
out, err := s3conn.GetObjectAcl(&s3.GetObjectAclInput{
|
||||
Bucket: aws.String(rs.Primary.Attributes["bucket"]),
|
||||
Key: aws.String(rs.Primary.Attributes["key"]),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetObjectAcl error: %v", err)
|
||||
}
|
||||
|
||||
var perms []string
|
||||
for _, v := range out.Grants {
|
||||
perms = append(perms, *v.Permission)
|
||||
}
|
||||
sort.Strings(perms)
|
||||
|
||||
if !reflect.DeepEqual(perms, expectedPerms) {
|
||||
return fmt.Errorf("Expected ACL permissions to be %v, got %v", expectedPerms, perms)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceAWSS3BucketObjectAcl_validation(t *testing.T) {
|
||||
_, errors := validateS3BucketObjectAclType("incorrect", "acl")
|
||||
if len(errors) == 0 {
|
||||
|
@ -337,28 +365,102 @@ func TestResourceAWSS3BucketObjectAcl_validation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testAccCheckAWSS3BucketObjectAcl(n string, expectedPerms []string) resource.TestCheckFunc {
|
||||
func TestAccAWSS3BucketObject_storageClass(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
var obj s3.GetObjectOutput
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSS3BucketObjectDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
PreConfig: func() {},
|
||||
Config: testAccAWSS3BucketObjectConfigContent(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSS3BucketObjectExists(
|
||||
"aws_s3_bucket_object.object",
|
||||
&obj),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_s3_bucket_object.object",
|
||||
"storage_class",
|
||||
"STANDARD"),
|
||||
testAccCheckAWSS3BucketObjectStorageClass(
|
||||
"aws_s3_bucket_object.object",
|
||||
"STANDARD"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccAWSS3BucketObjectConfig_storageClass(rInt, "REDUCED_REDUNDANCY"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSS3BucketObjectExists(
|
||||
"aws_s3_bucket_object.object",
|
||||
&obj),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_s3_bucket_object.object",
|
||||
"storage_class",
|
||||
"REDUCED_REDUNDANCY"),
|
||||
testAccCheckAWSS3BucketObjectStorageClass(
|
||||
"aws_s3_bucket_object.object",
|
||||
"REDUCED_REDUNDANCY"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceAWSS3BucketObjectStorageClass_validation(t *testing.T) {
|
||||
_, errors := validateS3BucketObjectStorageClassType("incorrect", "storage_class")
|
||||
if len(errors) == 0 {
|
||||
t.Fatalf("Expected to trigger a validation error")
|
||||
}
|
||||
|
||||
var testCases = []struct {
|
||||
Value string
|
||||
ErrCount int
|
||||
}{
|
||||
{
|
||||
Value: "STANDARD",
|
||||
ErrCount: 0,
|
||||
},
|
||||
{
|
||||
Value: "REDUCED_REDUNDANCY",
|
||||
ErrCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
_, errors := validateS3BucketObjectStorageClassType(tc.Value, "storage_class")
|
||||
if len(errors) != tc.ErrCount {
|
||||
t.Fatalf("Expected not to trigger a validation error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAWSS3BucketObjectStorageClass(n, expectedClass string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, _ := s.RootModule().Resources[n]
|
||||
s3conn := testAccProvider.Meta().(*AWSClient).s3conn
|
||||
|
||||
out, err := s3conn.GetObjectAcl(&s3.GetObjectAclInput{
|
||||
out, err := s3conn.HeadObject(&s3.HeadObjectInput{
|
||||
Bucket: aws.String(rs.Primary.Attributes["bucket"]),
|
||||
Key: aws.String(rs.Primary.Attributes["key"]),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetObjectAcl error: %v", err)
|
||||
return fmt.Errorf("HeadObject error: %v", err)
|
||||
}
|
||||
|
||||
var perms []string
|
||||
for _, v := range out.Grants {
|
||||
perms = append(perms, *v.Permission)
|
||||
// The "STANDARD" (which is also the default) storage
|
||||
// class when set would not be included in the results.
|
||||
storageClass := s3.StorageClassStandard
|
||||
if out.StorageClass != nil {
|
||||
storageClass = *out.StorageClass
|
||||
}
|
||||
sort.Strings(perms)
|
||||
|
||||
if !reflect.DeepEqual(perms, expectedPerms) {
|
||||
return fmt.Errorf("Expected ACL permissions to be %v, got %v", expectedPerms, perms)
|
||||
if storageClass != expectedClass {
|
||||
return fmt.Errorf("Expected Storage Class to be %v, got %v",
|
||||
expectedClass, storageClass)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -472,3 +574,17 @@ resource "aws_s3_bucket_object" "object" {
|
|||
}
|
||||
`, randInt, acl)
|
||||
}
|
||||
|
||||
func testAccAWSS3BucketObjectConfig_storageClass(randInt int, storage_class string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_s3_bucket" "object_bucket" {
|
||||
bucket = "tf-object-test-bucket-%d"
|
||||
}
|
||||
resource "aws_s3_bucket_object" "object" {
|
||||
bucket = "${aws_s3_bucket.object_bucket.bucket}"
|
||||
key = "test-key"
|
||||
content = "some_bucket_content"
|
||||
storage_class = "%s"
|
||||
}
|
||||
`, randInt, storage_class)
|
||||
}
|
||||
|
|
|
@ -58,6 +58,8 @@ The following arguments are supported:
|
|||
* `content_encoding` - (Optional) Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
|
||||
* `content_language` - (Optional) The language the content is in e.g. en-US or en-GB.
|
||||
* `content_type` - (Optional) A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
|
||||
* `storage_class` - (Optional) Specifies the desired [Storage Class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
|
||||
for the object. Can be either "`STANDARD`", "`REDUCED_REDUNDANCY`", or "`STANDARD_IA`". Defaults to "`STANDARD`".
|
||||
* `etag` - (Optional) Used to trigger updates. The only meaningful value is `${md5(file("path/to/file"))}`.
|
||||
This attribute is not compatible with `kms_key_id`
|
||||
* `kms_key_id` - (Optional) Specifies the AWS KMS Key ID to use for object encryption.
|
||||
|
|
Loading…
Reference in New Issue