Skip to content

Commit

Permalink
Merge pull request #23703 from hashicorp/b-s3-bucket-replication-conf…
Browse files Browse the repository at this point in the history
…iguration-to-typelist

r/s3_bucket_replication_configuration: update `rule` to a TypeList and mark `rule.id` as Computed and `rule.prefix` as Deprecated
  • Loading branch information
anGie44 authored Mar 17, 2022
2 parents ab23b65 + 6c9867e commit 0896842
Show file tree
Hide file tree
Showing 5 changed files with 179 additions and 176 deletions.
11 changes: 11 additions & 0 deletions .changelog/23703.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
```release-note:bug
resource/aws_s3_bucket_replication_configuration: Set `rule.id` as Computed to prevent drift when the value is not configured
```

```release-note:bug
resource/aws_s3_bucket_replication_configuration: Change `rule` configuration block to list instead of set
```

```release-note:note:
resource/aws_s3_bucket_replication_configuration: The `rule.prefix` parameter has been deprecated. Use the `rule.filter` parameter instead.
```
161 changes: 0 additions & 161 deletions internal/service/s3/bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -2029,167 +2029,6 @@ func grantHash(v interface{}) int {
return create.StringHashcode(buf.String())
}

func rulesHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})

if !ok {
return 0
}

if v, ok := m["id"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["prefix"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["status"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil {
buf.WriteString(fmt.Sprintf("%d-", destinationHash(v[0])))
}
if v, ok := m["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil {
buf.WriteString(fmt.Sprintf("%d-", sourceSelectionCriteriaHash(v[0])))
}
if v, ok := m["priority"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
}
if v, ok := m["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil {
buf.WriteString(fmt.Sprintf("%d-", replicationRuleFilterHash(v[0])))

if v, ok := m["delete_marker_replication_status"]; ok && v.(string) == s3.DeleteMarkerReplicationStatusEnabled {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
}
return create.StringHashcode(buf.String())
}

func replicationRuleFilterHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})

if !ok {
return 0
}

if v, ok := m["prefix"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["tags"]; ok {
buf.WriteString(fmt.Sprintf("%d-", tftags.New(v).Hash()))
}
return create.StringHashcode(buf.String())
}

func destinationHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})

if !ok {
return 0
}

if v, ok := m["bucket"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["storage_class"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["replica_kms_key_id"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["account"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil {
buf.WriteString(fmt.Sprintf("%d-", accessControlTranslationHash(v[0])))
}
if v, ok := m["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil {
buf.WriteString(fmt.Sprintf("%d-", replicationTimeHash(v[0])))
}
if v, ok := m["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil {
buf.WriteString(fmt.Sprintf("%d-", metricsHash(v[0])))
}
return create.StringHashcode(buf.String())
}

func accessControlTranslationHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})

if !ok {
return 0
}

if v, ok := m["owner"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
return create.StringHashcode(buf.String())
}

func metricsHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})

if !ok {
return 0
}

if v, ok := m["minutes"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
}
if v, ok := m["status"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
return create.StringHashcode(buf.String())
}

func replicationTimeHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})

if !ok {
return 0
}

if v, ok := m["minutes"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
}
if v, ok := m["status"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
return create.StringHashcode(buf.String())
}

func sourceSelectionCriteriaHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})

if !ok {
return 0
}

if v, ok := m["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil {
buf.WriteString(fmt.Sprintf("%d-", sourceSseKmsObjectsHash(v[0])))
}
return create.StringHashcode(buf.String())
}

func sourceSseKmsObjectsHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})

if !ok {
return 0
}

if v, ok := m["enabled"]; ok {
buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
}
return create.StringHashcode(buf.String())
}

type S3Website struct {
Endpoint, Domain string
}
Expand Down
11 changes: 6 additions & 5 deletions internal/service/s3/bucket_replication_configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,8 @@ func ResourceBucketReplicationConfiguration() *schema.Resource {
Sensitive: true,
},
"rule": {
Type: schema.TypeSet,
Type: schema.TypeList,
Required: true,
Set: rulesHash,
MaxItems: 1000,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
Expand Down Expand Up @@ -241,12 +240,14 @@ func ResourceBucketReplicationConfiguration() *schema.Resource {
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringLenBetween(0, 255),
},
"prefix": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringLenBetween(0, 1024),
Deprecated: "Use filter instead",
},
"priority": {
Type: schema.TypeInt,
Expand Down Expand Up @@ -308,7 +309,7 @@ func resourceBucketReplicationConfigurationCreate(d *schema.ResourceData, meta i

rc := &s3.ReplicationConfiguration{
Role: aws.String(d.Get("role").(string)),
Rules: ExpandReplicationRules(d.Get("rule").(*schema.Set).List()),
Rules: ExpandReplicationRules(d.Get("rule").([]interface{})),
}

input := &s3.PutBucketReplicationInput{
Expand Down Expand Up @@ -376,7 +377,7 @@ func resourceBucketReplicationConfigurationRead(d *schema.ResourceData, meta int

d.Set("bucket", d.Id())
d.Set("role", r.Role)
if err := d.Set("rule", schema.NewSet(rulesHash, FlattenReplicationRules(r.Rules))); err != nil {
if err := d.Set("rule", FlattenReplicationRules(r.Rules)); err != nil {
return fmt.Errorf("error setting rule: %w", err)
}

Expand All @@ -388,7 +389,7 @@ func resourceBucketReplicationConfigurationUpdate(d *schema.ResourceData, meta i

rc := &s3.ReplicationConfiguration{
Role: aws.String(d.Get("role").(string)),
Rules: ExpandReplicationRules(d.Get("rule").(*schema.Set).List()),
Rules: ExpandReplicationRules(d.Get("rule").([]interface{})),
}

input := &s3.PutBucketReplicationInput{
Expand Down
136 changes: 136 additions & 0 deletions internal/service/s3/bucket_replication_configuration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -540,6 +540,46 @@ func TestAccS3BucketReplicationConfiguration_replicaModifications(t *testing.T)
})
}

// TestAccS3BucketReplicationConfiguration_withoutId ensures a configuration with a Computed
// rule.id does not result in a non-empty plan
// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23690
func TestAccS3BucketReplicationConfiguration_withoutId(t *testing.T) {
resourceName := "aws_s3_bucket_replication_configuration.test"
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
dstBucketResourceName := "aws_s3_bucket.destination"
iamRoleResourceName := "aws_iam_role.test"

// record the initialized providers so that we can use them to check for the instances in each region
var providers []*schema.Provider

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(t) },
ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID),
ProviderFactories: acctest.FactoriesAlternate(&providers),
CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers),
Steps: []resource.TestStep{
{
Config: testAccBucketReplicationConfiguration_prefix_withoutIdConfig(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckBucketReplicationConfigurationExists(resourceName),
resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"),
resource.TestCheckResourceAttr(resourceName, "rule.#", "1"),
resource.TestCheckResourceAttrSet(resourceName, "rule.0.id"),
resource.TestCheckResourceAttr(resourceName, "rule.0.prefix", "foo"),
resource.TestCheckResourceAttr(resourceName, "rule.0.status", s3.ReplicationRuleStatusEnabled),
resource.TestCheckResourceAttr(resourceName, "rule.0.destination.#", "1"),
resource.TestCheckResourceAttrPair(resourceName, "rule.0.destination.0.bucket", dstBucketResourceName, "arn"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

// StorageClass issue: https://github.com/hashicorp/terraform/issues/10909
func TestAccS3BucketReplicationConfiguration_withoutStorageClass(t *testing.T) {
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
Expand Down Expand Up @@ -838,6 +878,10 @@ func TestAccS3BucketReplicationConfiguration_filter_emptyPrefix(t *testing.T) {
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
// The "rule" parameter as a TypeList will have a nil value
// if prefix was specified as an empty string, which was used as workaround
// when the parameter was a TypeSet
ImportStateVerifyIgnore: []string{"rule.0.filter.0.prefix"},
},
},
})
Expand Down Expand Up @@ -959,6 +1003,47 @@ func TestAccS3BucketReplicationConfiguration_filter_andOperator(t *testing.T) {
})
}

// TestAccS3BucketReplicationConfiguration_filter_withoutId ensures a configuration with a Computed
// rule.id does not result in a non-empty plan.
// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23690
func TestAccS3BucketReplicationConfiguration_filter_withoutId(t *testing.T) {
resourceName := "aws_s3_bucket_replication_configuration.test"
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
dstBucketResourceName := "aws_s3_bucket.destination"
iamRoleResourceName := "aws_iam_role.test"

// record the initialized providers so that we can use them to check for the instances in each region
var providers []*schema.Provider

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(t) },
ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID),
ProviderFactories: acctest.FactoriesAlternate(&providers),
CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers),
Steps: []resource.TestStep{
{
Config: testAccBucketReplicationConfiguration_filter_withoutIdConfig(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckBucketReplicationConfigurationExists(resourceName),
resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"),
resource.TestCheckResourceAttr(resourceName, "rule.#", "1"),
resource.TestCheckResourceAttrSet(resourceName, "rule.0.id"),
resource.TestCheckResourceAttr(resourceName, "rule.0.filter.#", "1"),
resource.TestCheckResourceAttr(resourceName, "rule.0.status", s3.ReplicationRuleStatusEnabled),
resource.TestCheckResourceAttr(resourceName, "rule.0.delete_marker_replication.#", "1"),
resource.TestCheckResourceAttr(resourceName, "rule.0.destination.#", "1"),
resource.TestCheckResourceAttrPair(resourceName, "rule.0.destination.0.bucket", dstBucketResourceName, "arn"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/21961
func TestAccS3BucketReplicationConfiguration_withoutPrefix(t *testing.T) {
resourceName := "aws_s3_bucket_replication_configuration.test"
Expand Down Expand Up @@ -1119,6 +1204,57 @@ resource "aws_s3_bucket_replication_configuration" "test" {
}`, storageClass)
}

func testAccBucketReplicationConfiguration_prefix_withoutIdConfig(rName string) string {
return acctest.ConfigCompose(
testAccBucketReplicationConfigurationBase(rName), `
resource "aws_s3_bucket_replication_configuration" "test" {
depends_on = [
aws_s3_bucket_versioning.source,
aws_s3_bucket_versioning.destination
]
bucket = aws_s3_bucket.source.id
role = aws_iam_role.test.arn
rule {
prefix = "foo"
status = "Enabled"
destination {
bucket = aws_s3_bucket.destination.arn
}
}
}`)
}

func testAccBucketReplicationConfiguration_filter_withoutIdConfig(rName string) string {
return acctest.ConfigCompose(
testAccBucketReplicationConfigurationBase(rName), `
resource "aws_s3_bucket_replication_configuration" "test" {
depends_on = [
aws_s3_bucket_versioning.source,
aws_s3_bucket_versioning.destination
]
bucket = aws_s3_bucket.source.id
role = aws_iam_role.test.arn
rule {
filter {}
status = "Enabled"
delete_marker_replication {
status = "Disabled"
}
destination {
bucket = aws_s3_bucket.destination.arn
}
}
}`)
}

func testAccBucketReplicationConfigurationRTC(rName string) string {
return acctest.ConfigCompose(
testAccBucketReplicationConfigurationBase(rName),
Expand Down
Loading

0 comments on commit 0896842

Please sign in to comment.