provider/aws: Add support S3 Object Lifecycle Rule (#6220)

* providers/aws: Add support S3 Object Lifecycle Rule

* Fix failed vet command

* Fix failed acceptance tests

* Check nil pointer before dereference.

* Move S3 lifecycle rule id validator func to validators.go

* Don't fail when get lifecycle rule's response code is 404
This commit is contained in:
KOJIMA Kazunori 2016-04-20 19:16:14 +09:00 committed by Paul Stack
parent 483d081e3e
commit 5e33517394
5 changed files with 731 additions and 0 deletions

View File

@ -183,6 +183,109 @@ func resourceAwsS3Bucket() *schema.Resource {
},
},
"lifecycle_rule": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validateS3BucketLifecycleRuleId,
},
"prefix": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"enabled": &schema.Schema{
Type: schema.TypeBool,
Required: true,
},
"abort_incomplete_multipart_upload_days": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"expiration": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Set: expirationHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"date": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateS3BucketLifecycleTimestamp,
},
"days": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"expired_object_delete_marker": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
},
},
"noncurrent_version_expiration": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Set: expirationHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"days": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
},
},
},
"transition": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Set: transitionHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"date": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateS3BucketLifecycleTimestamp,
},
"days": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"storage_class": &schema.Schema{
Type: schema.TypeString,
Required: true,
ValidateFunc: validateS3BucketLifecycleStorageClass,
},
},
},
},
"noncurrent_version_transition": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Set: transitionHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"days": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"storage_class": &schema.Schema{
Type: schema.TypeString,
Required: true,
ValidateFunc: validateS3BucketLifecycleStorageClass,
},
},
},
},
},
},
},
"tags": tagsSchema(),
"force_destroy": &schema.Schema{
@ -286,6 +389,12 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
}
}
if d.HasChange("lifecycle_rule") {
if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil {
return err
}
}
return resourceAwsS3BucketRead(d, meta)
}
@ -434,6 +543,110 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
}
}
// Read the lifecycle configuration
lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{
Bucket: aws.String(d.Id()),
})
if err != nil {
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
return err
}
}
log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle)
if len(lifecycle.Rules) > 0 {
rules := make([]map[string]interface{}, 0, len(lifecycle.Rules))
for _, lifecycleRule := range lifecycle.Rules {
rule := make(map[string]interface{})
// ID
if lifecycleRule.ID != nil && *lifecycleRule.ID != "" {
rule["id"] = *lifecycleRule.ID
}
// Prefix
if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" {
rule["prefix"] = *lifecycleRule.Prefix
}
// Enabled
if lifecycleRule.Status != nil {
if *lifecycleRule.Status == s3.ExpirationStatusEnabled {
rule["enabled"] = true
} else {
rule["enabled"] = false
}
}
// AbortIncompleteMultipartUploadDays
if lifecycleRule.AbortIncompleteMultipartUpload != nil {
if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil {
rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
}
}
// expiration
if lifecycleRule.Expiration != nil {
e := make(map[string]interface{})
if lifecycleRule.Expiration.Date != nil {
e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02")
}
if lifecycleRule.Expiration.Days != nil {
e["days"] = int(*lifecycleRule.Expiration.Days)
}
if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil {
e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker
}
rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e})
}
// noncurrent_version_expiration
if lifecycleRule.NoncurrentVersionExpiration != nil {
e := make(map[string]interface{})
if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil {
e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)
}
rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e})
}
//// transition
if len(lifecycleRule.Transitions) > 0 {
transitions := make([]interface{}, 0, len(lifecycleRule.Transitions))
for _, v := range lifecycleRule.Transitions {
t := make(map[string]interface{})
if v.Date != nil {
t["date"] = (*v.Date).Format("2006-01-02")
}
if v.Days != nil {
t["days"] = int(*v.Days)
}
if v.StorageClass != nil {
t["storage_class"] = *v.StorageClass
}
transitions = append(transitions, t)
}
rule["transition"] = schema.NewSet(transitionHash, transitions)
}
// noncurrent_version_transition
if len(lifecycleRule.NoncurrentVersionTransitions) > 0 {
transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions))
for _, v := range lifecycleRule.NoncurrentVersionTransitions {
t := make(map[string]interface{})
if v.NoncurrentDays != nil {
t["days"] = int(*v.NoncurrentDays)
}
if v.StorageClass != nil {
t["storage_class"] = *v.StorageClass
}
transitions = append(transitions, t)
}
rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions)
}
rules = append(rules, rule)
}
if err := d.Set("lifecycle_rule", rules); err != nil {
return err
}
}
// Add the region as an attribute
location, err := s3conn.GetBucketLocation(
&s3.GetBucketLocationInput{
@ -863,6 +1076,137 @@ func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) err
return nil
}
func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
bucket := d.Get("bucket").(string)
lifecycleRules := d.Get("lifecycle_rule").([]interface{})
rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules))
for i, lifecycleRule := range lifecycleRules {
r := lifecycleRule.(map[string]interface{})
rule := &s3.LifecycleRule{
Prefix: aws.String(r["prefix"].(string)),
}
// ID
if val, ok := r["id"].(string); ok && val != "" {
rule.ID = aws.String(val)
} else {
rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-"))
}
// Enabled
if val, ok := r["enabled"].(bool); ok && val {
rule.Status = aws.String(s3.ExpirationStatusEnabled)
} else {
rule.Status = aws.String(s3.ExpirationStatusDisabled)
}
// AbortIncompleteMultipartUpload
if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 {
rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{
DaysAfterInitiation: aws.Int64(int64(val)),
}
}
// Expiration
expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List()
if len(expiration) > 0 {
e := expiration[0].(map[string]interface{})
i := &s3.LifecycleExpiration{}
if val, ok := e["date"].(string); ok && val != "" {
t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
if err != nil {
return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
}
i.Date = aws.Time(t)
} else if val, ok := e["days"].(int); ok && val > 0 {
i.Days = aws.Int64(int64(val))
} else if val, ok := e["expired_object_delete_marker"].(bool); ok {
i.ExpiredObjectDeleteMarker = aws.Bool(val)
}
rule.Expiration = i
}
// NoncurrentVersionExpiration
nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List()
if len(nc_expiration) > 0 {
e := nc_expiration[0].(map[string]interface{})
if val, ok := e["days"].(int); ok && val > 0 {
rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{
NoncurrentDays: aws.Int64(int64(val)),
}
}
}
// Transitions
transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List()
if len(transitions) > 0 {
rule.Transitions = make([]*s3.Transition, 0, len(transitions))
for _, transition := range transitions {
transition := transition.(map[string]interface{})
i := &s3.Transition{}
if val, ok := transition["date"].(string); ok && val != "" {
t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
if err != nil {
return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
}
i.Date = aws.Time(t)
} else if val, ok := transition["days"].(int); ok && val > 0 {
i.Days = aws.Int64(int64(val))
}
if val, ok := transition["storage_class"].(string); ok && val != "" {
i.StorageClass = aws.String(val)
}
rule.Transitions = append(rule.Transitions, i)
}
}
// NoncurrentVersionTransitions
nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List()
if len(nc_transitions) > 0 {
rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions))
for _, transition := range nc_transitions {
transition := transition.(map[string]interface{})
i := &s3.NoncurrentVersionTransition{}
if val, ok := transition["days"].(int); ok && val > 0 {
i.NoncurrentDays = aws.Int64(int64(val))
}
if val, ok := transition["storage_class"].(string); ok && val != "" {
i.StorageClass = aws.String(val)
}
rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i)
}
}
rules = append(rules, rule)
}
i := &s3.PutBucketLifecycleConfigurationInput{
Bucket: aws.String(bucket),
LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
Rules: rules,
},
}
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil {
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return fmt.Errorf("Error putting S3 lifecycle: %s", err)
}
return nil
}
func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) {
withNulls, err := json.Marshal(w)
if err != nil {
@ -927,6 +1271,36 @@ func normalizeRegion(region string) string {
return region
}
func expirationHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
if v, ok := m["date"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["days"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
}
if v, ok := m["expired_object_delete_marker"]; ok {
buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
}
return hashcode.String(buf.String())
}
func transitionHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
if v, ok := m["date"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["days"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
}
if v, ok := m["storage_class"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
return hashcode.String(buf.String())
}
type S3Website struct {
Endpoint, Domain string
}

View File

@ -343,6 +343,85 @@ func TestAccAWSS3Bucket_Logging(t *testing.T) {
})
}
func TestAccAWSS3Bucket_Lifecycle(t *testing.T) {
rInt := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketConfigWithLifecycle(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.id", "id1"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.prefix", "path1/"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.expiration.2613713285.days", "365"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.expiration.2613713285.date", ""),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.expiration.2613713285.expired_object_delete_marker", "false"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.transition.2000431762.date", ""),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.transition.2000431762.days", "30"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.transition.2000431762.storage_class", "STANDARD_IA"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.transition.6450812.date", ""),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.transition.6450812.days", "60"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.transition.6450812.storage_class", "GLACIER"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.id", "id2"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.prefix", "path2/"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.expiration.2855832418.date", "2016-01-12"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.expiration.2855832418.days", "0"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.expiration.2855832418.expired_object_delete_marker", "false"),
),
},
resource.TestStep{
Config: testAccAWSS3BucketConfigWithVersioningLifecycle(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.id", "id1"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.prefix", "path1/"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.enabled", "true"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.noncurrent_version_expiration.80908210.days", "365"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.noncurrent_version_transition.1377917700.days", "30"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.noncurrent_version_transition.1377917700.storage_class", "STANDARD_IA"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.noncurrent_version_transition.2528035817.days", "60"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.0.noncurrent_version_transition.2528035817.storage_class", "GLACIER"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.id", "id2"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.prefix", "path2/"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.enabled", "false"),
resource.TestCheckResourceAttr(
"aws_s3_bucket.bucket", "lifecycle_rule.1.noncurrent_version_expiration.80908210.days", "365"),
),
},
},
})
}
func testAccCheckAWSS3BucketDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).s3conn
@ -812,3 +891,77 @@ resource "aws_s3_bucket" "bucket" {
}
`, randInt, randInt)
}
func testAccAWSS3BucketConfigWithLifecycle(randInt int) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-%d"
acl = "private"
lifecycle_rule {
id = "id1"
prefix = "path1/"
enabled = true
expiration {
days = 365
}
transition {
days = 30
storage_class = "STANDARD_IA"
}
transition {
days = 60
storage_class = "GLACIER"
}
}
lifecycle_rule {
id = "id2"
prefix = "path2/"
enabled = true
expiration {
date = "2016-01-12"
}
}
}
`, randInt)
}
func testAccAWSS3BucketConfigWithVersioningLifecycle(randInt int) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-%d"
acl = "private"
versioning {
enabled = false
}
lifecycle_rule {
id = "id1"
prefix = "path1/"
enabled = true
noncurrent_version_expiration {
days = 365
}
noncurrent_version_transition {
days = 30
storage_class = "STANDARD_IA"
}
noncurrent_version_transition {
days = 60
storage_class = "GLACIER"
}
}
lifecycle_rule {
id = "id2"
prefix = "path2/"
enabled = false
noncurrent_version_expiration {
days = 365
}
}
}
`, randInt)
}

View File

@ -6,6 +6,7 @@ import (
"regexp"
"time"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/helper/schema"
)
@ -367,3 +368,33 @@ func validateLogGroupName(v interface{}, k string) (ws []string, errors []error)
return
}
func validateS3BucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
_, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value))
if err != nil {
errors = append(errors, fmt.Errorf(
"%q cannot be parsed as RFC3339 Timestamp Format", value))
}
return
}
func validateS3BucketLifecycleStorageClass(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if value != s3.TransitionStorageClassStandardIa && value != s3.TransitionStorageClassGlacier {
errors = append(errors, fmt.Errorf(
"%q must be one of '%q', '%q'", k, s3.TransitionStorageClassStandardIa, s3.TransitionStorageClassGlacier))
}
return
}
func validateS3BucketLifecycleRuleId(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 255 {
errors = append(errors, fmt.Errorf(
"%q cannot exceed 255 characters", k))
}
return
}

View File

@ -382,3 +382,81 @@ func TestValidateLogGroupName(t *testing.T) {
}
}
}
func TestValidateS3BucketLifecycleTimestamp(t *testing.T) {
validDates := []string{
"2016-01-01",
"2006-01-02",
}
for _, v := range validDates {
_, errors := validateS3BucketLifecycleTimestamp(v, "date")
if len(errors) != 0 {
t.Fatalf("%q should be valid date: %q", v, errors)
}
}
invalidDates := []string{
"Jan 01 2016",
"20160101",
}
for _, v := range invalidDates {
_, errors := validateS3BucketLifecycleTimestamp(v, "date")
if len(errors) == 0 {
t.Fatalf("%q should be invalid date", v)
}
}
}
func TestValidateS3BucketLifecycleStorageClass(t *testing.T) {
validStorageClass := []string{
"STANDARD_IA",
"GLACIER",
}
for _, v := range validStorageClass {
_, errors := validateS3BucketLifecycleStorageClass(v, "storage_class")
if len(errors) != 0 {
t.Fatalf("%q should be valid storage class: %q", v, errors)
}
}
invalidStorageClass := []string{
"STANDARD",
"1234",
}
for _, v := range invalidStorageClass {
_, errors := validateS3BucketLifecycleStorageClass(v, "storage_class")
if len(errors) == 0 {
t.Fatalf("%q should be invalid storage class", v)
}
}
}
func TestValidateS3BucketLifecycleRuleId(t *testing.T) {
validId := []string{
"YadaHereAndThere",
"Valid-5Rule_ID",
"This . is also %% valid@!)+*(:ID",
"1234",
strings.Repeat("W", 255),
}
for _, v := range validId {
_, errors := validateS3BucketLifecycleRuleId(v, "id")
if len(errors) != 0 {
t.Fatalf("%q should be a valid lifecycle rule id: %q", v, errors)
}
}
invalidId := []string{
// length > 255
strings.Repeat("W", 256),
}
for _, v := range invalidId {
_, errors := validateS3BucketLifecycleRuleId(v, "id")
if len(errors) == 0 {
t.Fatalf("%q should be an invalid lifecycle rule id", v)
}
}
}

View File

@ -97,6 +97,66 @@ resource "aws_s3_bucket" "b" {
}
```
### Using object lifecycle
```
resource "aws_s3_bucket" "bucket" {
bucket = "my-bucket"
acl = "private"
lifecycle_rule {
id = "log"
prefix = "log/"
enabled = true
transition {
days = 30
storage_class = "STANDARD_IA"
}
transition {
days = 60
storage_class = "GLACIER"
}
expiration {
days = 90
}
}
lifecycle_rule {
id = "log"
prefix = "tmp/"
enabled = true
expiration {
date = "2016-01-12"
}
}
}
resource "aws_s3_bucket" "versioning_bucket" {
bucket = "my-versioning-bucket"
acl = "private"
versioning {
enabled = false
}
lifecycle_rule {
prefix = "config/"
enabled = true
noncurrent_version_transition {
days = 30
storage_class = "STANDARD_IA"
}
noncurrent_version_transition {
days = 60
storage_class = "GLACIER"
}
noncurrent_version_expiration {
days = 90
}
}
}
```
## Argument Reference
The following arguments are supported:
@ -111,6 +171,7 @@ The following arguments are supported:
* `cors_rule` - (Optional) A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
* `versioning` - (Optional) A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
* `logging` - (Optional) A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).
* `lifecycle_rule` - (Optional) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below).
The `website` object supports the following:
@ -137,6 +198,40 @@ The `logging` object supports the following:
* `target_bucket` - (Required) The name of the bucket that will receive the log objects.
* `target_prefix` - (Optional) To specify a key prefix for log objects.
The 'lifecycle_rule' object supports the following:
* `id` - (Optional) Unique identifier for the rule.
* `prefix` - (Required) Object key prefix identifying one or more objects to which the rule applies.
* `enabled` - (Required) Specifies lifecycle rule status.
* `abort_incomplete_multipart_upload_days` (Optional) Specifies the number of days after initiating a multipart upload when the multipart upload must be completed.
* `expiration` - (Optional) Specifies a period in the object's expire (documented below).
* `transition` - (Optional) Specifies a period in the object's transitions (documented below).
* `noncurrent_version_expiration` - (Optional) Specifies when noncurrent object versions expire (documented below).
* `noncurrent_version_transition` - (Optional) Specifies when noncurrent object versions transitions (documented below).
At least one of `expiration`, `transition`, `noncurrent_version_expiration`, `noncurrent_version_transition` must be specified.
The `expiration` object supports the following
* `date` (Optional) Specifies the date after which you want the corresponding action to take effect.
* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect.
* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers.
The `transition` object supports the following
* `date` (Optional) Specifies the date after which you want the corresponding action to take effect.
* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect.
* `storage_class` (Required) Specifies the Amazon S3 storage class to which you want the object to transition. Can be `STANDARD_IA` or `GLACIER`.
The `noncurrent_version_expiration` object supports the following
* `days` (Required) Specifies the number of days an object is noncurrent object versions expire.
The `noncurrent_version_transition` object supports the following
* `days` (Required) Specifies the number of days an object is noncurrent object versions expire.
* `storage_class` (Required) Specifies the Amazon S3 storage class to which you want the noncurrent versions object to transition. Can be `STANDARD_IA` or `GLACIER`.
## Attributes Reference
The following attributes are exported: