From a4ed36352bf80738fafa6fcb474feca091ba19d6 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 14 Jan 2022 16:16:53 -0500 Subject: [PATCH] r/s3_bucket: deprecate 'lifecycle_rule' --- .changelog/22581.txt | 3 + internal/service/s3/bucket.go | 497 ++++++++++--------------- internal/service/s3/bucket_test.go | 413 -------------------- internal/service/s3/validate.go | 17 - internal/service/s3/validate_test.go | 31 -- internal/tags/tags.go | 9 + website/docs/r/s3_bucket.html.markdown | 128 +------ 7 files changed, 220 insertions(+), 878 deletions(-) create mode 100644 .changelog/22581.txt delete mode 100644 internal/service/s3/validate.go delete mode 100644 internal/service/s3/validate_test.go diff --git a/.changelog/22581.txt b/.changelog/22581.txt new file mode 100644 index 00000000000..caf15078b7a --- /dev/null +++ b/.changelog/22581.txt @@ -0,0 +1,3 @@ +```release-note:note +resource/aws_s3_bucket: The `lifecycle_rule` argument has been deprecated and is now read-only. Use the `aws_s3_bucket_lifecycle_configuration` resource instead. +``` \ No newline at end of file diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 805db7f5a48..01a17f70e33 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -267,111 +267,114 @@ func ResourceBucket() *schema.Resource { }, "lifecycle_rule": { - Type: schema.TypeList, - Optional: true, + Type: schema.TypeSet, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringLenBetween(0, 255), + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "prefix": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, - "tags": tftags.TagsSchema(), + "tags": tftags.TagsSchemaComputedDeprecated("Use the aws_s3_bucket_lifecycle_configuration resource instead"), "enabled": { - Type: schema.TypeBool, - Required: true, + Type: schema.TypeBool, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "abort_incomplete_multipart_upload_days": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "expiration": { Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "date": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validBucketLifecycleTimestamp, + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "days": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "expired_object_delete_marker": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, }, "noncurrent_version_expiration": { Type: schema.TypeList, - MaxItems: 1, - Optional: true, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "days": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(1), + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "transition": { Type: schema.TypeSet, - Optional: true, - Set: transitionHash, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "date": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validBucketLifecycleTimestamp, + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "days": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "noncurrent_version_transition": { Type: schema.TypeSet, - Optional: true, - Set: transitionHash, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "days": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "force_destroy": { @@ -811,12 +814,6 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } - if d.HasChange("lifecycle_rule") { - if err := resourceBucketLifecycleUpdate(conn, d); err != nil { - return err - } - } - if d.HasChange("acceleration_status") { if err := resourceBucketAccelerationUpdate(conn, d); err != nil { return err @@ -1143,127 +1140,15 @@ func resourceBucketRead(d *schema.ResourceData, meta interface{}) error { Bucket: aws.String(d.Id()), }) }) - if err != nil && !tfawserr.ErrMessageContains(err, "NoSuchLifecycleConfiguration", "") { - return err + if err != nil && !tfawserr.ErrCodeEquals(err, "NoSuchLifecycleConfiguration") { + return fmt.Errorf("error getting S3 Bucket (%s) Lifecycle Configuration: %w", d.Id(), err) } - lifecycleRules := make([]map[string]interface{}, 0) - if lifecycle, ok := lifecycleResponse.(*s3.GetBucketLifecycleConfigurationOutput); ok && len(lifecycle.Rules) > 0 { - lifecycleRules = make([]map[string]interface{}, 0, len(lifecycle.Rules)) - - for _, lifecycleRule := range lifecycle.Rules { - log.Printf("[DEBUG] S3 bucket: %s, read lifecycle rule: %v", d.Id(), lifecycleRule) - rule := make(map[string]interface{}) - - // ID - if lifecycleRule.ID != nil && aws.StringValue(lifecycleRule.ID) != "" { - rule["id"] = aws.StringValue(lifecycleRule.ID) - } - filter := lifecycleRule.Filter - if filter != nil { - if filter.And != nil { - // Prefix - if filter.And.Prefix != nil && aws.StringValue(filter.And.Prefix) != "" { - rule["prefix"] = aws.StringValue(filter.And.Prefix) - } - // Tag - if len(filter.And.Tags) > 0 { - rule["tags"] = KeyValueTags(filter.And.Tags).IgnoreAWS().Map() - } - } else { - // Prefix - if filter.Prefix != nil && aws.StringValue(filter.Prefix) != "" { - rule["prefix"] = aws.StringValue(filter.Prefix) - } - // Tag - if filter.Tag != nil { - rule["tags"] = KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() - } - } - } else { - if lifecycleRule.Prefix != nil { - rule["prefix"] = aws.StringValue(lifecycleRule.Prefix) - } - } - - // Enabled - if lifecycleRule.Status != nil { - if aws.StringValue(lifecycleRule.Status) == s3.ExpirationStatusEnabled { - rule["enabled"] = true - } else { - rule["enabled"] = false - } - } - - // AbortIncompleteMultipartUploadDays - if lifecycleRule.AbortIncompleteMultipartUpload != nil { - if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { - rule["abort_incomplete_multipart_upload_days"] = int(aws.Int64Value(lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)) - } - } - - // expiration - if lifecycleRule.Expiration != nil { - e := make(map[string]interface{}) - if lifecycleRule.Expiration.Date != nil { - e["date"] = (aws.TimeValue(lifecycleRule.Expiration.Date)).Format("2006-01-02") - } - if lifecycleRule.Expiration.Days != nil { - e["days"] = int(aws.Int64Value(lifecycleRule.Expiration.Days)) - } - if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { - e["expired_object_delete_marker"] = aws.BoolValue(lifecycleRule.Expiration.ExpiredObjectDeleteMarker) - } - rule["expiration"] = []interface{}{e} - } - // noncurrent_version_expiration - if lifecycleRule.NoncurrentVersionExpiration != nil { - e := make(map[string]interface{}) - if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { - e["days"] = int(aws.Int64Value(lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)) - } - rule["noncurrent_version_expiration"] = []interface{}{e} - } - //// transition - if len(lifecycleRule.Transitions) > 0 { - transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) - for _, v := range lifecycleRule.Transitions { - t := make(map[string]interface{}) - if v.Date != nil { - t["date"] = (aws.TimeValue(v.Date)).Format("2006-01-02") - } - if v.Days != nil { - t["days"] = int(aws.Int64Value(v.Days)) - } - if v.StorageClass != nil { - t["storage_class"] = aws.StringValue(v.StorageClass) - } - transitions = append(transitions, t) - } - rule["transition"] = schema.NewSet(transitionHash, transitions) - } - // noncurrent_version_transition - if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { - transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) - for _, v := range lifecycleRule.NoncurrentVersionTransitions { - t := make(map[string]interface{}) - if v.NoncurrentDays != nil { - t["days"] = int(aws.Int64Value(v.NoncurrentDays)) - } - if v.StorageClass != nil { - t["storage_class"] = aws.StringValue(v.StorageClass) - } - transitions = append(transitions, t) - } - rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) - } - - lifecycleRules = append(lifecycleRules, rule) + if lifecycle, ok := lifecycleResponse.(*s3.GetBucketLifecycleConfigurationOutput); ok { + if err := d.Set("lifecycle_rule", flattenBucketLifecycleRules(lifecycle.Rules)); err != nil { + return fmt.Errorf("error setting lifecycle_rule: %s", err) } } - if err := d.Set("lifecycle_rule", lifecycleRules); err != nil { - return fmt.Errorf("error setting lifecycle_rule: %s", err) - } // Read the bucket replication configuration @@ -2203,163 +2088,179 @@ func resourceBucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema return nil } -func resourceBucketLifecycleUpdate(conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) +func flattenBucketLifecycleRuleExpiration(expiration *s3.LifecycleExpiration) []interface{} { + if expiration == nil { + return []interface{}{} + } - lifecycleRules := d.Get("lifecycle_rule").([]interface{}) + m := make(map[string]interface{}) - if len(lifecycleRules) == 0 || lifecycleRules[0] == nil { - i := &s3.DeleteBucketLifecycleInput{ - Bucket: aws.String(bucket), - } + if expiration.Date != nil { + m["date"] = (aws.TimeValue(expiration.Date)).Format("2006-01-02") + } + if expiration.Days != nil { + m["days"] = int(aws.Int64Value(expiration.Days)) + } + if expiration.ExpiredObjectDeleteMarker != nil { + m["expired_object_delete_marker"] = aws.BoolValue(expiration.ExpiredObjectDeleteMarker) + } - _, err := conn.DeleteBucketLifecycle(i) - if err != nil { - return fmt.Errorf("Error removing S3 lifecycle: %s", err) + return []interface{}{m} +} + +func flattenBucketLifecycleRuleFilter(filter *s3.LifecycleRuleFilter) []interface{} { + if filter == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if filter.And != nil { + // Prefix + if filter.And.Prefix != nil { + m["prefix"] = aws.StringValue(filter.And.Prefix) + } + // Tag + if len(filter.And.Tags) > 0 { + m["tags"] = KeyValueTags(filter.And.Tags).IgnoreAWS().Map() + } + } else { + // Prefix + if filter.Prefix != nil { + m["prefix"] = aws.StringValue(filter.Prefix) + } + // Tag + if filter.Tag != nil { + m["tags"] = KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() } - return nil } - rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) + return []interface{}{m} +} - for i, lifecycleRule := range lifecycleRules { - r := lifecycleRule.(map[string]interface{}) +func flattenBucketLifecycleRules(lifecycleRules []*s3.LifecycleRule) []interface{} { + if len(lifecycleRules) == 0 { + return []interface{}{} + } - rule := &s3.LifecycleRule{} + var results []interface{} - // Filter - tags := Tags(tftags.New(r["tags"]).IgnoreAWS()) - filter := &s3.LifecycleRuleFilter{} - if len(tags) > 0 { - lifecycleRuleAndOp := &s3.LifecycleRuleAndOperator{} - lifecycleRuleAndOp.SetPrefix(r["prefix"].(string)) - lifecycleRuleAndOp.SetTags(tags) - filter.SetAnd(lifecycleRuleAndOp) - } else { - filter.SetPrefix(r["prefix"].(string)) + for _, lifecycleRule := range lifecycleRules { + if lifecycleRule == nil { + continue + } + + rule := make(map[string]interface{}) + + // AbortIncompleteMultipartUploadDays + if lifecycleRule.AbortIncompleteMultipartUpload != nil { + if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { + rule["abort_incomplete_multipart_upload_days"] = int(aws.Int64Value(lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)) + } } - rule.SetFilter(filter) // ID - if val, ok := r["id"].(string); ok && val != "" { - rule.ID = aws.String(val) - } else { - rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) + if lifecycleRule.ID != nil { + rule["id"] = aws.StringValue(lifecycleRule.ID) } - // Enabled - if val, ok := r["enabled"].(bool); ok && val { - rule.Status = aws.String(s3.ExpirationStatusEnabled) - } else { - rule.Status = aws.String(s3.ExpirationStatusDisabled) + // Filter + if lifecycleRule.Filter != nil { + rule["filter"] = flattenBucketLifecycleRuleFilter(lifecycleRule.Filter) } - // AbortIncompleteMultipartUpload - if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { - rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ - DaysAfterInitiation: aws.Int64(int64(val)), + // Prefix + if lifecycleRule.Prefix != nil { + rule["prefix"] = aws.StringValue(lifecycleRule.Prefix) + } + + // Enabled + if lifecycleRule.Status != nil { + if aws.StringValue(lifecycleRule.Status) == s3.ExpirationStatusEnabled { + rule["enabled"] = true + } else { + rule["enabled"] = false } } // Expiration - expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).([]interface{}) - if len(expiration) > 0 && expiration[0] != nil { - e := expiration[0].(map[string]interface{}) - i := &s3.LifecycleExpiration{} - if val, ok := e["date"].(string); ok && val != "" { - t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) - if err != nil { - return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) - } - i.Date = aws.Time(t) - } else if val, ok := e["days"].(int); ok && val > 0 { - i.Days = aws.Int64(int64(val)) - } else if val, ok := e["expired_object_delete_marker"].(bool); ok { - i.ExpiredObjectDeleteMarker = aws.Bool(val) - } - rule.Expiration = i + if lifecycleRule.Expiration != nil { + rule["expiration"] = flattenBucketLifecycleRuleExpiration(lifecycleRule.Expiration) } // NoncurrentVersionExpiration - nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).([]interface{}) - if len(nc_expiration) > 0 && nc_expiration[0] != nil { - e := nc_expiration[0].(map[string]interface{}) - - if val, ok := e["days"].(int); ok && val > 0 { - rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ - NoncurrentDays: aws.Int64(int64(val)), - } + if lifecycleRule.NoncurrentVersionExpiration != nil { + e := make(map[string]interface{}) + if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { + e["days"] = int(aws.Int64Value(lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)) } + rule["noncurrent_version_expiration"] = []interface{}{e} } - // Transitions - transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() - if len(transitions) > 0 { - rule.Transitions = make([]*s3.Transition, 0, len(transitions)) - for _, transition := range transitions { - transition := transition.(map[string]interface{}) - i := &s3.Transition{} - if val, ok := transition["date"].(string); ok && val != "" { - t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) - if err != nil { - return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) - } - i.Date = aws.Time(t) - } else if val, ok := transition["days"].(int); ok && val >= 0 { - i.Days = aws.Int64(int64(val)) - } - if val, ok := transition["storage_class"].(string); ok && val != "" { - i.StorageClass = aws.String(val) - } + // NoncurrentVersionTransition + if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { + rule["noncurrent_version_transition"] = flattenBucketLifecycleRuleNoncurrentVersionTransitions(lifecycleRule.NoncurrentVersionTransitions) + } - rule.Transitions = append(rule.Transitions, i) - } + // Transition + if len(lifecycleRule.Transitions) > 0 { + rule["transition"] = flattenBucketLifecycleRuleTransitions(lifecycleRule.Transitions) } - // NoncurrentVersionTransitions - nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() - if len(nc_transitions) > 0 { - rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) - for _, transition := range nc_transitions { - transition := transition.(map[string]interface{}) - i := &s3.NoncurrentVersionTransition{} - if val, ok := transition["days"].(int); ok && val >= 0 { - i.NoncurrentDays = aws.Int64(int64(val)) - } - if val, ok := transition["storage_class"].(string); ok && val != "" { - i.StorageClass = aws.String(val) - } - rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) - } + results = append(results, rule) + } + + return results +} + +func flattenBucketLifecycleRuleNoncurrentVersionTransitions(transitions []*s3.NoncurrentVersionTransition) []interface{} { + if len(transitions) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, t := range transitions { + m := make(map[string]interface{}) + + if t.NoncurrentDays != nil { + m["days"] = int(aws.Int64Value(t.NoncurrentDays)) } - // As a lifecycle rule requires 1 or more transition/expiration actions, - // we explicitly pass a default ExpiredObjectDeleteMarker value to be able to create - // the rule while keeping the policy unaffected if the conditions are not met. - if rule.Expiration == nil && rule.NoncurrentVersionExpiration == nil && - rule.Transitions == nil && rule.NoncurrentVersionTransitions == nil && - rule.AbortIncompleteMultipartUpload == nil { - rule.Expiration = &s3.LifecycleExpiration{ExpiredObjectDeleteMarker: aws.Bool(false)} + if t.StorageClass != nil { + m["storage_class"] = aws.StringValue(t.StorageClass) } - rules = append(rules, rule) + results = append(results, m) } - i := &s3.PutBucketLifecycleConfigurationInput{ - Bucket: aws.String(bucket), - LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ - Rules: rules, - }, + return results +} + +func flattenBucketLifecycleRuleTransitions(transitions []*s3.Transition) []interface{} { + if len(transitions) == 0 { + return []interface{}{} } - _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return conn.PutBucketLifecycleConfiguration(i) - }) - if err != nil { - return fmt.Errorf("Error putting S3 lifecycle: %s", err) + var results []interface{} + + for _, t := range transitions { + m := make(map[string]interface{}) + + if t.Date != nil { + m["date"] = (aws.TimeValue(t.Date)).Format("2006-01-02") + } + if t.Days != nil { + m["days"] = int(aws.Int64Value(t.Days)) + } + if t.StorageClass != nil { + m["storage_class"] = aws.StringValue(t.StorageClass) + } + + results = append(results, m) } - return nil + return results } func flattenServerSideEncryptionConfiguration(c *s3.ServerSideEncryptionConfiguration) []map[string]interface{} { @@ -2604,26 +2505,6 @@ func grantHash(v interface{}) int { return create.StringHashcode(buf.String()) } -func transitionHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["date"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["days"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["storage_class"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return create.StringHashcode(buf.String()) -} - func rulesHash(v interface{}) int { var buf bytes.Buffer m, ok := v.(map[string]interface{}) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 227c16afbd3..ea4dbb367cc 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -1252,208 +1252,6 @@ func TestAccS3Bucket_Security_logging(t *testing.T) { }) } -func TestAccS3Bucket_Manage_lifecycleBasic(t *testing.T) { - bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBucketWithLifecycleConfig(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.days", "365"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.date", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "false"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "30", - "storage_class": "STANDARD_IA", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "60", - "storage_class": "INTELLIGENT_TIERING", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "90", - "storage_class": "ONEZONE_IA", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "120", - "storage_class": "GLACIER", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "210", - "storage_class": "DEEP_ARCHIVE", - }), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.id", "id2"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.prefix", "path2/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.date", "2016-01-12"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.days", "0"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.expired_object_delete_marker", "false"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.id", "id3"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.prefix", "path3/"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.2.transition.*", map[string]string{ - "days": "0", - }), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.id", "id4"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.prefix", "path4/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.tags.tagKey", "tagValue"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.tags.terraform", "hashicorp"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.id", "id5"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.tags.tagKey", "tagValue"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.tags.terraform", "hashicorp"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.4.transition.*", map[string]string{ - "days": "0", - "storage_class": "GLACIER", - }), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.5.id", "id6"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.5.tags.tagKey", "tagValue"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.5.transition.*", map[string]string{ - "days": "0", - "storage_class": "GLACIER", - }), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - { - Config: testAccBucketWithVersioningLifecycleConfig(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.noncurrent_version_expiration.0.days", "365"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.noncurrent_version_transition.*", map[string]string{ - "days": "30", - "storage_class": "STANDARD_IA", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.noncurrent_version_transition.*", map[string]string{ - "days": "60", - "storage_class": "GLACIER", - }), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.id", "id2"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.prefix", "path2/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.noncurrent_version_expiration.0.days", "365"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.id", "id3"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.prefix", "path3/"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.2.noncurrent_version_transition.*", map[string]string{ - "days": "0", - "storage_class": "GLACIER", - }), - ), - }, - { - Config: testAccBucketConfig_Basic(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - ), - }, - }, - }) -} - -func TestAccS3Bucket_Manage_lifecycleExpireMarkerOnly(t *testing.T) { - bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBucketWithLifecycleExpireMarkerConfig(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.days", "0"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.date", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "true"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - { - Config: testAccBucketConfig_Basic(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - ), - }, - }, - }) -} - -// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/11420 -func TestAccS3Bucket_Manage_lifecycleRuleExpirationEmptyBlock(t *testing.T) { - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_s3_bucket.bucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBucketLifecycleRuleExpirationEmptyConfigurationBlockConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - ), - }, - }, - }) -} - -// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/15138 -func TestAccS3Bucket_Manage_lifecycleRuleAbortIncompleteMultipartUploadDaysNoExpiration(t *testing.T) { - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_s3_bucket.bucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBucketLifecycleRuleAbortIncompleteMultipartUploadDaysConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - }, - }) -} - func TestAccS3Bucket_Replication_basic(t *testing.T) { rInt := sdkacctest.RandInt() alternateRegion := acctest.AlternateRegion() @@ -3950,217 +3748,6 @@ resource "aws_s3_bucket" "bucket" { `, bucketName) } -func testAccBucketWithLifecycleConfig(bucketName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - acl = "private" - - lifecycle_rule { - id = "id1" - prefix = "path1/" - enabled = true - - expiration { - days = 365 - } - - transition { - days = 30 - storage_class = "STANDARD_IA" - } - - transition { - days = 60 - storage_class = "INTELLIGENT_TIERING" - } - - transition { - days = 90 - storage_class = "ONEZONE_IA" - } - - transition { - days = 120 - storage_class = "GLACIER" - } - - transition { - days = 210 - storage_class = "DEEP_ARCHIVE" - } - } - - lifecycle_rule { - id = "id2" - prefix = "path2/" - enabled = true - - expiration { - date = "2016-01-12" - } - } - - lifecycle_rule { - id = "id3" - prefix = "path3/" - enabled = true - - transition { - days = 0 - storage_class = "GLACIER" - } - } - - lifecycle_rule { - id = "id4" - prefix = "path4/" - enabled = true - - tags = { - "tagKey" = "tagValue" - "terraform" = "hashicorp" - } - - expiration { - date = "2016-01-12" - } - } - - lifecycle_rule { - id = "id5" - enabled = true - - tags = { - "tagKey" = "tagValue" - "terraform" = "hashicorp" - } - - transition { - days = 0 - storage_class = "GLACIER" - } - } - - lifecycle_rule { - id = "id6" - enabled = true - - tags = { - "tagKey" = "tagValue" - } - - transition { - days = 0 - storage_class = "GLACIER" - } - } -} -`, bucketName) -} - -func testAccBucketWithLifecycleExpireMarkerConfig(bucketName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - acl = "private" - - lifecycle_rule { - id = "id1" - prefix = "path1/" - enabled = true - - expiration { - expired_object_delete_marker = "true" - } - } -} -`, bucketName) -} - -func testAccBucketWithVersioningLifecycleConfig(bucketName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - acl = "private" - - versioning { - enabled = false - } - - lifecycle_rule { - id = "id1" - prefix = "path1/" - enabled = true - - noncurrent_version_expiration { - days = 365 - } - - noncurrent_version_transition { - days = 30 - storage_class = "STANDARD_IA" - } - - noncurrent_version_transition { - days = 60 - storage_class = "GLACIER" - } - } - - lifecycle_rule { - id = "id2" - prefix = "path2/" - enabled = false - - noncurrent_version_expiration { - days = 365 - } - } - - lifecycle_rule { - id = "id3" - prefix = "path3/" - enabled = true - - noncurrent_version_transition { - days = 0 - storage_class = "GLACIER" - } - } -} -`, bucketName) -} - -func testAccBucketLifecycleRuleExpirationEmptyConfigurationBlockConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - - lifecycle_rule { - enabled = true - id = "id1" - - expiration {} - } -} -`, rName) -} - -func testAccBucketLifecycleRuleAbortIncompleteMultipartUploadDaysConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - - lifecycle_rule { - abort_incomplete_multipart_upload_days = 7 - enabled = true - id = "id1" - } -} -`, rName) -} - func testAccBucketReplicationBasicConfig(randInt int) string { return acctest.ConfigAlternateRegionProvider() + fmt.Sprintf(` data "aws_partition" "current" {} diff --git a/internal/service/s3/validate.go b/internal/service/s3/validate.go deleted file mode 100644 index 50185c826f9..00000000000 --- a/internal/service/s3/validate.go +++ /dev/null @@ -1,17 +0,0 @@ -package s3 - -import ( - "fmt" - "time" -) - -func validBucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q cannot be parsed as RFC3339 Timestamp Format", value)) - } - - return -} diff --git a/internal/service/s3/validate_test.go b/internal/service/s3/validate_test.go deleted file mode 100644 index 847122d2b91..00000000000 --- a/internal/service/s3/validate_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package s3 - -import ( - "testing" -) - -func TestValidBucketLifecycleTimestamp(t *testing.T) { - validDates := []string{ - "2016-01-01", - "2006-01-02", - } - - for _, v := range validDates { - _, errors := validBucketLifecycleTimestamp(v, "date") - if len(errors) != 0 { - t.Fatalf("%q should be valid date: %q", v, errors) - } - } - - invalidDates := []string{ - "Jan 01 2016", - "20160101", - } - - for _, v := range invalidDates { - _, errors := validBucketLifecycleTimestamp(v, "date") - if len(errors) == 0 { - t.Fatalf("%q should be invalid date", v) - } - } -} diff --git a/internal/tags/tags.go b/internal/tags/tags.go index 0b25cd32433..252c4f5df57 100644 --- a/internal/tags/tags.go +++ b/internal/tags/tags.go @@ -23,6 +23,15 @@ func TagsSchemaComputed() *schema.Schema { } } +func TagsSchemaComputedDeprecated(message string) *schema.Schema { + return &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Deprecated: message, + } +} + func TagsSchemaForceNew() *schema.Schema { return &schema.Schema{ Type: schema.TypeMap, diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 6e269c5c9b9..70c1e456a82 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -103,79 +103,6 @@ resource "aws_s3_bucket" "b" { } ``` -### Using object lifecycle - -```terraform -resource "aws_s3_bucket" "bucket" { - bucket = "my-bucket" - acl = "private" - - lifecycle_rule { - id = "log" - enabled = true - - prefix = "log/" - - tags = { - rule = "log" - autoclean = "true" - } - - transition { - days = 30 - storage_class = "STANDARD_IA" # or "ONEZONE_IA" - } - - transition { - days = 60 - storage_class = "GLACIER" - } - - expiration { - days = 90 - } - } - - lifecycle_rule { - id = "tmp" - prefix = "tmp/" - enabled = true - - expiration { - date = "2016-01-12" - } - } -} - -resource "aws_s3_bucket" "versioning_bucket" { - bucket = "my-versioning-bucket" - acl = "private" - - versioning { - enabled = true - } - - lifecycle_rule { - prefix = "config/" - enabled = true - - noncurrent_version_transition { - days = 30 - storage_class = "STANDARD_IA" - } - - noncurrent_version_transition { - days = 60 - storage_class = "GLACIER" - } - - noncurrent_version_expiration { - days = 90 - } - } -} -``` - ### Using replication configuration ~> **NOTE:** See the [`aws_s3_bucket_replication_configuration` resource](/docs/providers/aws/r/s3_bucket_replication_configuration.html) to support bi-directional replication configuration and additional features. @@ -363,7 +290,6 @@ The following arguments are supported: * `cors_rule` - (Optional) A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below). * `versioning` - (Optional) A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below) * `logging` - (Optional) A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below). -* `lifecycle_rule` - (Optional) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below). * `acceleration_status` - (Optional) Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. * `request_payer` - (Optional) Specifies who should bear the cost of Amazon S3 data transfer. Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur @@ -401,41 +327,6 @@ The `logging` object supports the following: * `target_bucket` - (Required) The name of the bucket that will receive the log objects. * `target_prefix` - (Optional) To specify a key prefix for log objects. -The `lifecycle_rule` object supports the following: - -* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. -* `prefix` - (Optional) Object key prefix identifying one or more objects to which the rule applies. -* `tags` - (Optional) Specifies object tags key and value. -* `enabled` - (Required) Specifies lifecycle rule status. -* `abort_incomplete_multipart_upload_days` (Optional) Specifies the number of days after initiating a multipart upload when the multipart upload must be completed. -* `expiration` - (Optional) Specifies a period in the object's expire (documented below). -* `transition` - (Optional) Specifies a period in the object's transitions (documented below). -* `noncurrent_version_expiration` - (Optional) Specifies when noncurrent object versions expire (documented below). -* `noncurrent_version_transition` - (Optional) Specifies when noncurrent object versions transitions (documented below). - -At least one of `abort_incomplete_multipart_upload_days`, `expiration`, `transition`, `noncurrent_version_expiration`, `noncurrent_version_transition` must be specified. - -The `expiration` object supports the following - -* `date` (Optional) Specifies the date after which you want the corresponding action to take effect. -* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect. -* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. - -The `transition` object supports the following - -* `date` (Optional) Specifies the date after which you want the corresponding action to take effect. -* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect. -* `storage_class` (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. - -The `noncurrent_version_expiration` object supports the following - -* `days` (Required) Specifies the number of days noncurrent object versions expire. - -The `noncurrent_version_transition` object supports the following - -* `days` (Required) Specifies the number of days noncurrent object versions transition. -* `storage_class` (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. - The `replication_configuration` object supports the following: ~> **NOTE:** See the [`aws_s3_bucket_replication_configuration` resource documentation](/docs/providers/aws/r/s3_bucket_replication_configuration.html) to avoid conflicts. Replication configuration can only be defined in one resource not both. When using the independent replication configuration resource the following lifecycle rule is needed on the `aws_s3_bucket` resource. @@ -559,6 +450,25 @@ In addition to all arguments above, the following attributes are exported: * `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. +* `lifecycle_rule` - A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). + * `id` - Unique identifier for the rule. + * `prefix` - Object key prefix identifying one or more objects to which the rule applies. + * `tags` - Object tags key and value. + * `enabled` - Lifecycle rule status. + * `abort_incomplete_multipart_upload_days` - Number of days after initiating a multipart upload when the multipart upload must be completed. + * `expiration` - The expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. + * `date` - Indicates at what date the object is to be moved or deleted. + * `days` - Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. + * `expired_object_delete_marker` - Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. + * `transition` - Specifies when an Amazon S3 object transitions to a specified storage class. + * `date` - The date after which you want the corresponding action to take effect. + * `days` - The number of days after object creation when the specific rule action takes effect. + * `storage_class` - The Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) an object will transition to. + * `noncurrent_version_expiration` - When noncurrent object versions expire. + * `days` - The number of days noncurrent object versions expire. + * `noncurrent_version_transition` - When noncurrent object versions transition. + * `days` - The number of days noncurrent object versions transition. + * `storage_class` - The Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) an object will transition to. * `region` - The AWS region this bucket resides in. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). * `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.