diff --git a/.changelog/23703.txt b/.changelog/23703.txt new file mode 100644 index 000000000000..636fa01b9e8d --- /dev/null +++ b/.changelog/23703.txt @@ -0,0 +1,11 @@ +```release-note:bug +resource/aws_s3_bucket_replication_configuration: Set `rule.id` as Computed to prevent drift when the value is not configured +``` + +```release-note:bug +resource/aws_s3_bucket_replication_configuration: Change `rule` configuration block to list instead of set +``` + +```release-note:note: +resource/aws_s3_bucket_replication_configuration: The `rule.prefix` parameter has been deprecated. Use the `rule.filter` parameter instead. +``` \ No newline at end of file diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 9e1ba8d0062b..6730d0b37a9f 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2029,167 +2029,6 @@ func grantHash(v interface{}) int { return create.StringHashcode(buf.String()) } -func rulesHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["prefix"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["status"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - buf.WriteString(fmt.Sprintf("%d-", destinationHash(v[0]))) - } - if v, ok := m["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - buf.WriteString(fmt.Sprintf("%d-", sourceSelectionCriteriaHash(v[0]))) - } - if v, ok := m["priority"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0]))) - - if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - } - return create.StringHashcode(buf.String()) -} - -func replicationRuleFilterHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["prefix"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["tags"]; ok { - buf.WriteString(fmt.Sprintf("%d-", tftags.New(v).Hash())) - } - return create.StringHashcode(buf.String()) -} - -func destinationHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["bucket"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["storage_class"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["replica_kms_key_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["account"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - buf.WriteString(fmt.Sprintf("%d-", accessControlTranslationHash(v[0]))) - } - if v, ok := m["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - buf.WriteString(fmt.Sprintf("%d-", replicationTimeHash(v[0]))) - } - if v, ok := m["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - buf.WriteString(fmt.Sprintf("%d-", metricsHash(v[0]))) - } - return create.StringHashcode(buf.String()) -} - -func accessControlTranslationHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["owner"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return create.StringHashcode(buf.String()) -} - -func metricsHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["minutes"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["status"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return create.StringHashcode(buf.String()) -} - -func replicationTimeHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["minutes"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["status"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return create.StringHashcode(buf.String()) -} - -func sourceSelectionCriteriaHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - buf.WriteString(fmt.Sprintf("%d-", sourceSseKmsObjectsHash(v[0]))) - } - return create.StringHashcode(buf.String()) -} - -func sourceSseKmsObjectsHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["enabled"]; ok { - buf.WriteString(fmt.Sprintf("%t-", v.(bool))) - } - return create.StringHashcode(buf.String()) -} - type S3Website struct { Endpoint, Domain string } diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 94ccdd71fc67..14434ae9a438 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -44,9 +44,8 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Sensitive: true, }, "rule": { - Type: schema.TypeSet, + Type: schema.TypeList, Required: true, - Set: rulesHash, MaxItems: 1000, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -241,12 +240,14 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { "id": { Type: schema.TypeString, Optional: true, + Computed: true, ValidateFunc: validation.StringLenBetween(0, 255), }, "prefix": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringLenBetween(0, 1024), + Deprecated: "Use filter instead", }, "priority": { Type: schema.TypeInt, @@ -308,7 +309,7 @@ func resourceBucketReplicationConfigurationCreate(d *schema.ResourceData, meta i rc := &s3.ReplicationConfiguration{ Role: aws.String(d.Get("role").(string)), - Rules: ExpandReplicationRules(d.Get("rule").(*schema.Set).List()), + Rules: ExpandReplicationRules(d.Get("rule").([]interface{})), } input := &s3.PutBucketReplicationInput{ @@ -376,7 +377,7 @@ func resourceBucketReplicationConfigurationRead(d *schema.ResourceData, meta int d.Set("bucket", d.Id()) d.Set("role", r.Role) - if err := d.Set("rule", schema.NewSet(rulesHash, FlattenReplicationRules(r.Rules))); err != nil { + if err := d.Set("rule", FlattenReplicationRules(r.Rules)); err != nil { return fmt.Errorf("error setting rule: %w", err) } @@ -388,7 +389,7 @@ func resourceBucketReplicationConfigurationUpdate(d *schema.ResourceData, meta i rc := &s3.ReplicationConfiguration{ Role: aws.String(d.Get("role").(string)), - Rules: ExpandReplicationRules(d.Get("rule").(*schema.Set).List()), + Rules: ExpandReplicationRules(d.Get("rule").([]interface{})), } input := &s3.PutBucketReplicationInput{ diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 7655120f333f..076487008647 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -540,6 +540,46 @@ func TestAccS3BucketReplicationConfiguration_replicaModifications(t *testing.T) }) } +// TestAccS3BucketReplicationConfiguration_withoutId ensures a configuration with a Computed +// rule.id does not result in a non-empty plan +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23690 +func TestAccS3BucketReplicationConfiguration_withoutId(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfiguration_prefix_withoutIdConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "rule.0.id"), + resource.TestCheckResourceAttr(resourceName, "rule.0.prefix", "foo"), + resource.TestCheckResourceAttr(resourceName, "rule.0.status", s3.ReplicationRuleStatusEnabled), + resource.TestCheckResourceAttr(resourceName, "rule.0.destination.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "rule.0.destination.0.bucket", dstBucketResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + // StorageClass issue: https://github.com/hashicorp/terraform/issues/10909 func TestAccS3BucketReplicationConfiguration_withoutStorageClass(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -838,6 +878,10 @@ func TestAccS3BucketReplicationConfiguration_filter_emptyPrefix(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // The "rule" parameter as a TypeList will have a nil value + // if prefix was specified as an empty string, which was used as workaround + // when the parameter was a TypeSet + ImportStateVerifyIgnore: []string{"rule.0.filter.0.prefix"}, }, }, }) @@ -959,6 +1003,47 @@ func TestAccS3BucketReplicationConfiguration_filter_andOperator(t *testing.T) { }) } +// TestAccS3BucketReplicationConfiguration_filter_withoutId ensures a configuration with a Computed +// rule.id does not result in a non-empty plan. +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23690 +func TestAccS3BucketReplicationConfiguration_filter_withoutId(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfiguration_filter_withoutIdConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "rule.0.id"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.status", s3.ReplicationRuleStatusEnabled), + resource.TestCheckResourceAttr(resourceName, "rule.0.delete_marker_replication.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.destination.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "rule.0.destination.0.bucket", dstBucketResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/21961 func TestAccS3BucketReplicationConfiguration_withoutPrefix(t *testing.T) { resourceName := "aws_s3_bucket_replication_configuration.test" @@ -1119,6 +1204,57 @@ resource "aws_s3_bucket_replication_configuration" "test" { }`, storageClass) } +func testAccBucketReplicationConfiguration_prefix_withoutIdConfig(rName string) string { + return acctest.ConfigCompose( + testAccBucketReplicationConfigurationBase(rName), ` +resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + + bucket = aws_s3_bucket.source.id + role = aws_iam_role.test.arn + + rule { + prefix = "foo" + status = "Enabled" + + destination { + bucket = aws_s3_bucket.destination.arn + } + } +}`) +} + +func testAccBucketReplicationConfiguration_filter_withoutIdConfig(rName string) string { + return acctest.ConfigCompose( + testAccBucketReplicationConfigurationBase(rName), ` +resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + + bucket = aws_s3_bucket.source.id + role = aws_iam_role.test.arn + + rule { + filter {} + + status = "Enabled" + + delete_marker_replication { + status = "Disabled" + } + + destination { + bucket = aws_s3_bucket.destination.arn + } + } +}`) +} + func testAccBucketReplicationConfigurationRTC(rName string) string { return acctest.ConfigCompose( testAccBucketReplicationConfigurationBase(rName), diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index af366a92f414..1ffea4290d09 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -110,6 +110,8 @@ resource "aws_s3_bucket" "source" { } resource "aws_s3_bucket_acl" "source_bucket_acl" { + provider = aws.central + bucket = aws_s3_bucket.source.id acl = "private" } @@ -124,6 +126,7 @@ resource "aws_s3_bucket_versioning" "source" { } resource "aws_s3_bucket_replication_configuration" "replication" { + provider = aws.central # Must have bucket versioning enabled first depends_on = [aws_s3_bucket_versioning.source] @@ -131,8 +134,12 @@ resource "aws_s3_bucket_replication_configuration" "replication" { bucket = aws_s3_bucket.source.id rule { - id = "foobar" - prefix = "foo" + id = "foobar" + + filter { + prefix = "foo" + } + status = "Enabled" destination { @@ -160,12 +167,12 @@ resource "aws_s3_bucket_versioning" "east" { } resource "aws_s3_bucket" "west" { - provider = west + provider = aws.west bucket = "tf-test-bucket-west-12345" } resource "aws_s3_bucket_versioning" "west" { - provider = west + provider = aws.west bucket = aws_s3_bucket.west.id versioning_configuration { @@ -181,8 +188,12 @@ resource "aws_s3_bucket_replication_configuration" "east_to_west" { bucket = aws_s3_bucket.east.id rule { - id = "foobar" - prefix = "foo" + id = "foobar" + + filter { + prefix = "foo" + } + status = "Enabled" destination { @@ -193,6 +204,7 @@ resource "aws_s3_bucket_replication_configuration" "east_to_west" { } resource "aws_s3_bucket_replication_configuration" "west_to_east" { + provider = aws.west # Must have bucket versioning enabled first depends_on = [aws_s3_bucket_versioning.west] @@ -200,8 +212,12 @@ resource "aws_s3_bucket_replication_configuration" "west_to_east" { bucket = aws_s3_bucket.west.id rule { - id = "foobar" - prefix = "foo" + id = "foobar" + + filter { + prefix = "foo" + } + status = "Enabled" destination { @@ -218,7 +234,7 @@ The following arguments are supported: * `bucket` - (Required) The name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. -* `rule` - (Required) Set of configuration blocks describing the rules managing the replication [documented below](#rule). +* `rule` - (Required) List of configuration blocks describing the rules managing the replication [documented below](#rule). * `token` - (Optional) A token to allow replication to be enabled on an Object Lock-enabled bucket. You must contact AWS support for the bucket's "Object Lock token". For more details, see [Using S3 Object Lock with replication](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-managing-replication). @@ -235,7 +251,7 @@ The `rule` configuration block supports the following arguments: * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). * `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). If not specified, the `rule` will default to using `prefix`. * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. -* `prefix` - (Optional, Conflicts with `filter`) Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string (`""`) if `filter` is not specified. +* `prefix` - (Optional, Conflicts with `filter`, **Deprecated**) Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string (`""`) if `filter` is not specified. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. * `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled".