From 3b4e69f6e9f3a4ca1acd98c4c6c390b4235c97b2 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 7 Nov 2024 11:02:47 -0800 Subject: [PATCH 01/35] Adds service S3 Tables --- .ci/.semgrep-service-name0.yml | 15 + .ci/.semgrep-service-name1.yml | 47 +- .ci/.semgrep-service-name2.yml | 79 ++- .ci/.semgrep-service-name3.yml | 108 ++-- .github/labeler-issue-triage.yml | 2 + .github/labeler-pr-triage.yml | 6 + .../components/generated/services_all.kt | 1 + go.mod | 1 + go.sum | 2 + infrastructure/repository/labels-service.tf | 1 + internal/conns/awsclient_gen.go | 5 + internal/provider/fwprovider/provider_gen.go | 7 + internal/provider/provider_gen.go | 8 + internal/provider/service_packages_gen.go | 2 + internal/service/s3tables/generate.go | 7 + .../s3tables/service_endpoint_resolver_gen.go | 82 +++ .../s3tables/service_endpoints_gen_test.go | 600 ++++++++++++++++++ .../service/s3tables/service_package_gen.go | 49 ++ internal/sweep/service_packages_gen_test.go | 2 + names/consts_gen.go | 2 + names/data/names_data.hcl | 22 + website/allowed-subcategories.txt | 1 + .../custom-service-endpoints.html.markdown | 1 + 23 files changed, 956 insertions(+), 94 deletions(-) create mode 100644 internal/service/s3tables/generate.go create mode 100644 internal/service/s3tables/service_endpoint_resolver_gen.go create mode 100644 internal/service/s3tables/service_endpoints_gen_test.go create mode 100644 internal/service/s3tables/service_package_gen.go diff --git a/.ci/.semgrep-service-name0.yml b/.ci/.semgrep-service-name0.yml index 754ea6c3eb91..1c424d5c199e 100644 --- a/.ci/.semgrep-service-name0.yml +++ b/.ci/.semgrep-service-name0.yml @@ -4315,3 +4315,18 @@ rules: - focus-metavariable: $NAME - pattern-not: func $NAME($T *testing.T) severity: WARNING + - id: connect-in-test-name + languages: + - go + message: Include "Connect" in test name + paths: + include: + - internal/service/connect/*_test.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccConnect" + - pattern-regex: ^TestAcc.* + severity: WARNING diff --git a/.ci/.semgrep-service-name1.yml b/.ci/.semgrep-service-name1.yml index bd6eba723acc..ba39d86cc217 100644 --- a/.ci/.semgrep-service-name1.yml +++ b/.ci/.semgrep-service-name1.yml @@ -1,20 +1,5 @@ # Generated by internal/generate/servicesemgrep/main.go; DO NOT EDIT. rules: - - id: connect-in-test-name - languages: - - go - message: Include "Connect" in test name - paths: - include: - - internal/service/connect/*_test.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-not-regex: "^TestAccConnect" - - pattern-regex: ^TestAcc.* - severity: WARNING - id: connect-in-const-name languages: - go @@ -4305,3 +4290,35 @@ rules: patterns: - pattern-regex: "(?i)IoTAnalytics" severity: WARNING + - id: iotanalytics-in-var-name + languages: + - go + message: Do not use "IoTAnalytics" in var name inside iotanalytics package + paths: + include: + - internal/service/iotanalytics + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)IoTAnalytics" + severity: WARNING + - id: iotevents-in-func-name + languages: + - go + message: Do not use "IoTEvents" in func name inside iotevents package + paths: + include: + - internal/service/iotevents + exclude: + - internal/service/iotevents/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)IoTEvents" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING diff --git a/.ci/.semgrep-service-name2.yml b/.ci/.semgrep-service-name2.yml index 566413e357f0..86a2dba99660 100644 --- a/.ci/.semgrep-service-name2.yml +++ b/.ci/.semgrep-service-name2.yml @@ -1,37 +1,5 @@ # Generated by internal/generate/servicesemgrep/main.go; DO NOT EDIT. rules: - - id: iotanalytics-in-var-name - languages: - - go - message: Do not use "IoTAnalytics" in var name inside iotanalytics package - paths: - include: - - internal/service/iotanalytics - patterns: - - pattern: var $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)IoTAnalytics" - severity: WARNING - - id: iotevents-in-func-name - languages: - - go - message: Do not use "IoTEvents" in func name inside iotevents package - paths: - include: - - internal/service/iotevents - exclude: - - internal/service/iotevents/list_pages_gen.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)IoTEvents" - - focus-metavariable: $NAME - - pattern-not: func $NAME($T *testing.T) - severity: WARNING - id: iotevents-in-test-name languages: - go @@ -4308,3 +4276,50 @@ rules: patterns: - pattern-regex: "(?i)Redshift" severity: WARNING + - id: redshiftdata-in-func-name + languages: + - go + message: Do not use "RedshiftData" in func name inside redshiftdata package + paths: + include: + - internal/service/redshiftdata + exclude: + - internal/service/redshiftdata/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)RedshiftData" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: redshiftdata-in-test-name + languages: + - go + message: Include "RedshiftData" in test name + paths: + include: + - internal/service/redshiftdata/*_test.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccRedshiftData" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: redshiftdata-in-const-name + languages: + - go + message: Do not use "RedshiftData" in const name inside redshiftdata package + paths: + include: + - internal/service/redshiftdata + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)RedshiftData" + severity: WARNING diff --git a/.ci/.semgrep-service-name3.yml b/.ci/.semgrep-service-name3.yml index a8b2be25f36b..36bfe8140900 100644 --- a/.ci/.semgrep-service-name3.yml +++ b/.ci/.semgrep-service-name3.yml @@ -1,52 +1,5 @@ # Generated by internal/generate/servicesemgrep/main.go; DO NOT EDIT. rules: - - id: redshiftdata-in-func-name - languages: - - go - message: Do not use "RedshiftData" in func name inside redshiftdata package - paths: - include: - - internal/service/redshiftdata - exclude: - - internal/service/redshiftdata/list_pages_gen.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)RedshiftData" - - focus-metavariable: $NAME - - pattern-not: func $NAME($T *testing.T) - severity: WARNING - - id: redshiftdata-in-test-name - languages: - - go - message: Include "RedshiftData" in test name - paths: - include: - - internal/service/redshiftdata/*_test.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-not-regex: "^TestAccRedshiftData" - - pattern-regex: ^TestAcc.* - severity: WARNING - - id: redshiftdata-in-const-name - languages: - - go - message: Do not use "RedshiftData" in const name inside redshiftdata package - paths: - include: - - internal/service/redshiftdata - patterns: - - pattern: const $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)RedshiftData" - severity: WARNING - id: redshiftdata-in-var-name languages: - go @@ -1236,6 +1189,67 @@ rules: patterns: - pattern-regex: "(?i)S3Outposts" severity: WARNING + - id: s3tables-in-func-name + languages: + - go + message: Do not use "S3Tables" in func name inside s3tables package + paths: + include: + - internal/service/s3tables + exclude: + - internal/service/s3tables/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)S3Tables" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: s3tables-in-test-name + languages: + - go + message: Include "S3Tables" in test name + paths: + include: + - internal/service/s3tables/*_test.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccS3Tables" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: s3tables-in-const-name + languages: + - go + message: Do not use "S3Tables" in const name inside s3tables package + paths: + include: + - internal/service/s3tables + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)S3Tables" + severity: WARNING + - id: s3tables-in-var-name + languages: + - go + message: Do not use "S3Tables" in var name inside s3tables package + paths: + include: + - internal/service/s3tables + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)S3Tables" + severity: WARNING - id: sagemaker-in-func-name languages: - go diff --git a/.github/labeler-issue-triage.yml b/.github/labeler-issue-triage.yml index fac4523bf0b7..96b42a88d4d2 100644 --- a/.github/labeler-issue-triage.yml +++ b/.github/labeler-issue-triage.yml @@ -599,6 +599,8 @@ service/s3control: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_(s3_account_|s3control_|s3_access_)' service/s3outposts: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_s3outposts_' +service/s3tables: + - '((\*|-)\s*`?|(data|resource)\s+"?)aws_s3tables_' service/sagemaker: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_sagemaker_' service/sagemakera2iruntime: diff --git a/.github/labeler-pr-triage.yml b/.github/labeler-pr-triage.yml index 04b40b53617b..28d5a01a83d8 100644 --- a/.github/labeler-pr-triage.yml +++ b/.github/labeler-pr-triage.yml @@ -1890,6 +1890,12 @@ service/s3outposts: - any-glob-to-any-file: - 'internal/service/s3outposts/**/*' - 'website/**/s3outposts_*' +service/s3tables: + - any: + - changed-files: + - any-glob-to-any-file: + - 'internal/service/s3tables/**/*' + - 'website/**/s3tables_*' service/sagemaker: - any: - changed-files: diff --git a/.teamcity/components/generated/services_all.kt b/.teamcity/components/generated/services_all.kt index 4e4dd545903f..d7ef593aa612 100644 --- a/.teamcity/components/generated/services_all.kt +++ b/.teamcity/components/generated/services_all.kt @@ -201,6 +201,7 @@ val services = mapOf( "s3" to ServiceSpec("S3 (Simple Storage)"), "s3control" to ServiceSpec("S3 Control"), "s3outposts" to ServiceSpec("S3 on Outposts"), + "s3tables" to ServiceSpec("S3 Tables"), "sagemaker" to ServiceSpec("SageMaker", vpcLock = true), "scheduler" to ServiceSpec("EventBridge Scheduler"), "schemas" to ServiceSpec("EventBridge Schemas"), diff --git a/go.mod b/go.mod index 0f41b70c35a8..c609033e5394 100644 --- a/go.mod +++ b/go.mod @@ -325,6 +325,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/service/s3tables v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect github.com/bgentry/speakeasy v0.2.0 // indirect github.com/boombuler/barcode v1.0.1 // indirect diff --git a/go.sum b/go.sum index 269234b837cb..d0ec3fe01ebf 100644 --- a/go.sum +++ b/go.sum @@ -453,6 +453,8 @@ github.com/aws/aws-sdk-go-v2/service/s3control v1.52.0 h1:tH6HJdKj1O5N8Uti8D2X20 github.com/aws/aws-sdk-go-v2/service/s3control v1.52.0/go.mod h1:sAOVMYapLSs3nCfdQo63qfVkKHlu97oqHDPrRbqayNg= github.com/aws/aws-sdk-go-v2/service/s3outposts v1.28.7 h1:yUuN4xIlI/2bUqniq5OdIw13FIGtUdPhzU4dzl2v6fM= github.com/aws/aws-sdk-go-v2/service/s3outposts v1.28.7/go.mod h1:yCIumXPHLHsjmrD8P9UvXFVT0R9R+Wlqut71bW5+ZY4= +github.com/aws/aws-sdk-go-v2/service/s3tables v1.0.0 h1:akXaBXvSIT3ca7Ojnc1TX+2pTK6lhyodZTYTrdUD6Vc= +github.com/aws/aws-sdk-go-v2/service/s3tables v1.0.0/go.mod h1:X85zeZUOEsqLnH/CShIydM9ANVMwXHL1A/pvTMSQw6U= github.com/aws/aws-sdk-go-v2/service/sagemaker v1.168.1 h1:Sz0HMK2/8itHAb9ABnMOEHfpOAIxk2a+f6EMsw7jn54= github.com/aws/aws-sdk-go-v2/service/sagemaker v1.168.1/go.mod h1:LoIh7abCP1rQng1kxJVJOTux55TaYN2tVN7G+zNbhus= github.com/aws/aws-sdk-go-v2/service/scheduler v1.12.7 h1:lRA+BvESWVoldCxaw3SG9UssITkVref8rlVy5xCsh0A= diff --git a/infrastructure/repository/labels-service.tf b/infrastructure/repository/labels-service.tf index 9473e9a0c6d0..70785563f5b1 100644 --- a/infrastructure/repository/labels-service.tf +++ b/infrastructure/repository/labels-service.tf @@ -286,6 +286,7 @@ variable "service_labels" { "s3", "s3control", "s3outposts", + "s3tables", "sagemaker", "sagemakera2iruntime", "sagemakeredge", diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 146a8982839f..1864dff48cd3 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -204,6 +204,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3control" "github.com/aws/aws-sdk-go-v2/service/s3outposts" + "github.com/aws/aws-sdk-go-v2/service/s3tables" "github.com/aws/aws-sdk-go-v2/service/sagemaker" "github.com/aws/aws-sdk-go-v2/service/scheduler" "github.com/aws/aws-sdk-go-v2/service/schemas" @@ -1052,6 +1053,10 @@ func (c *AWSClient) S3OutpostsClient(ctx context.Context) *s3outposts.Client { return errs.Must(client[*s3outposts.Client](ctx, c, names.S3Outposts, make(map[string]any))) } +func (c *AWSClient) S3TablesClient(ctx context.Context) *s3tables.Client { + return errs.Must(client[*s3tables.Client](ctx, c, names.S3Tables, make(map[string]any))) +} + func (c *AWSClient) SESClient(ctx context.Context) *ses.Client { return errs.Must(client[*ses.Client](ctx, c, names.SES, make(map[string]any))) } diff --git a/internal/provider/fwprovider/provider_gen.go b/internal/provider/fwprovider/provider_gen.go index ddbd248dd9c2..7ac975cc8e0f 100644 --- a/internal/provider/fwprovider/provider_gen.go +++ b/internal/provider/fwprovider/provider_gen.go @@ -1620,6 +1620,13 @@ func endpointsBlock() schema.SetNestedBlock { Description: "Use this to override the default service endpoint URL", }, + // s3tables + + "s3tables": schema.StringAttribute{ + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // sagemaker "sagemaker": schema.StringAttribute{ diff --git a/internal/provider/provider_gen.go b/internal/provider/provider_gen.go index aaf869c168f5..a7ffa17d8b52 100644 --- a/internal/provider/provider_gen.go +++ b/internal/provider/provider_gen.go @@ -1871,6 +1871,14 @@ func endpointsSchema() *schema.Schema { Description: "Use this to override the default service endpoint URL", }, + // s3tables + + "s3tables": { + Type: schema.TypeString, + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // sagemaker "sagemaker": { diff --git a/internal/provider/service_packages_gen.go b/internal/provider/service_packages_gen.go index 78084c4e207c..cae6f19a26c9 100644 --- a/internal/provider/service_packages_gen.go +++ b/internal/provider/service_packages_gen.go @@ -208,6 +208,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" "github.com/hashicorp/terraform-provider-aws/internal/service/s3outposts" + "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" "github.com/hashicorp/terraform-provider-aws/internal/service/scheduler" "github.com/hashicorp/terraform-provider-aws/internal/service/schemas" @@ -458,6 +459,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { s3.ServicePackage(ctx), s3control.ServicePackage(ctx), s3outposts.ServicePackage(ctx), + s3tables.ServicePackage(ctx), sagemaker.ServicePackage(ctx), scheduler.ServicePackage(ctx), schemas.ServicePackage(ctx), diff --git a/internal/service/s3tables/generate.go b/internal/service/s3tables/generate.go new file mode 100644 index 000000000000..998536c74211 --- /dev/null +++ b/internal/service/s3tables/generate.go @@ -0,0 +1,7 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run ../../generate/servicepackage/main.go +// ONLY generate directives and package declaration! Do not add anything else to this file. + +package s3tables diff --git a/internal/service/s3tables/service_endpoint_resolver_gen.go b/internal/service/s3tables/service_endpoint_resolver_gen.go new file mode 100644 index 000000000000..5e03ccf49e00 --- /dev/null +++ b/internal/service/s3tables/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package s3tables + +import ( + "context" + "fmt" + "net" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ s3tables.EndpointResolverV2 = resolverV2{} + +type resolverV2 struct { + defaultResolver s3tables.EndpointResolverV2 +} + +func newEndpointResolverV2() resolverV2 { + return resolverV2{ + defaultResolver: s3tables.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverV2) ResolveEndpoint(ctx context.Context, params s3tables.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws.Bool(false) + } else { + err = fmt.Errorf("looking up s3tables endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*s3tables.Options) { + return func(o *s3tables.Options) { + if endpoint != "" { + o.BaseEndpoint = aws.String(endpoint) + } + } +} diff --git a/internal/service/s3tables/service_endpoints_gen_test.go b/internal/service/s3tables/service_endpoints_gen_test.go new file mode 100644 index 000000000000..4592ba4ea5b4 --- /dev/null +++ b/internal/service/s3tables/service_endpoints_gen_test.go @@ -0,0 +1,600 @@ +// Code generated by internal/generate/serviceendpointtests/main.go; DO NOT EDIT. + +package s3tables_test + +import ( + "context" + "errors" + "fmt" + "maps" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + terraformsdk "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type endpointTestCase struct { + with []setupFunc + expected caseExpectations +} + +type caseSetup struct { + config map[string]any + configFile configFile + environmentVariables map[string]string +} + +type configFile struct { + baseUrl string + serviceUrl string +} + +type caseExpectations struct { + diags diag.Diagnostics + endpoint string + region string +} + +type apiCallParams struct { + endpoint string + region string +} + +type setupFunc func(setup *caseSetup) + +type callFunc func(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams + +const ( + packageNameConfigEndpoint = "https://packagename-config.endpoint.test/" + awsServiceEnvvarEndpoint = "https://service-envvar.endpoint.test/" + baseEnvvarEndpoint = "https://base-envvar.endpoint.test/" + serviceConfigFileEndpoint = "https://service-configfile.endpoint.test/" + baseConfigFileEndpoint = "https://base-configfile.endpoint.test/" +) + +const ( + packageName = "s3tables" + awsEnvVar = "AWS_ENDPOINT_URL_S3TABLES" + baseEnvVar = "AWS_ENDPOINT_URL" + configParam = "s3tables" +) + +const ( + expectedCallRegion = "us-west-2" //lintignore:AWSAT003 +) + +func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.Setenv + const providerRegion = "us-west-2" //lintignore:AWSAT003 + const expectedEndpointRegion = providerRegion + + testcases := map[string]endpointTestCase{ + "no config": { + with: []setupFunc{withNoConfig}, + expected: expectDefaultEndpoint(t, expectedEndpointRegion), + }, + + // Package name endpoint on Config + + "package name endpoint config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides aws service envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAwsEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides service config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + // Service endpoint in AWS envvar + + "service aws envvar": { + with: []setupFunc{ + withAwsEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base envvar": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides service config file": { + with: []setupFunc{ + withAwsEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base config file": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + // Base endpoint in envvar + + "base endpoint envvar": { + with: []setupFunc{ + withBaseEnvVar, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides service config file": { + with: []setupFunc{ + withBaseEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides base config file": { + with: []setupFunc{ + withBaseEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + // Service endpoint in config file + + "service config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + "service config file overrides base config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + withBaseEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + // Base endpoint in config file + + "base endpoint config file": { + with: []setupFunc{ + withBaseEndpointInConfigFile, + }, + expected: expectBaseConfigFileEndpoint(), + }, + + // Use FIPS endpoint on Config + + "use fips config": { + with: []setupFunc{ + withUseFIPSInConfig, + }, + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), + }, + + "use fips config with package name endpoint config": { + with: []setupFunc{ + withUseFIPSInConfig, + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + } + + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + t.Run(name, func(t *testing.T) { + testEndpointCase(t, providerRegion, testcase, callService) + }) + } +} + +func defaultEndpoint(region string) (url.URL, error) { + r := s3tables.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(context.Background(), s3tables.EndpointParameters{ + Region: aws.String(region), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := s3tables.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(context.Background(), s3tables.EndpointParameters{ + Region: aws.String(region), + UseFIPS: aws.Bool(true), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { + t.Helper() + + client := meta.S3TablesClient(ctx) + + var result apiCallParams + + _, err := client.ListTableBuckets(ctx, &s3tables.ListTableBucketsInput{}, + func(opts *s3tables.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } + + return result +} + +func withNoConfig(_ *caseSetup) { + // no-op +} + +func withPackageNameEndpointInConfig(setup *caseSetup) { + if _, ok := setup.config[names.AttrEndpoints]; !ok { + setup.config[names.AttrEndpoints] = []any{ + map[string]any{}, + } + } + endpoints := setup.config[names.AttrEndpoints].([]any)[0].(map[string]any) + endpoints[packageName] = packageNameConfigEndpoint +} + +func withAwsEnvVar(setup *caseSetup) { + setup.environmentVariables[awsEnvVar] = awsServiceEnvvarEndpoint +} + +func withBaseEnvVar(setup *caseSetup) { + setup.environmentVariables[baseEnvVar] = baseEnvvarEndpoint +} + +func withServiceEndpointInConfigFile(setup *caseSetup) { + setup.configFile.serviceUrl = serviceConfigFileEndpoint +} + +func withBaseEndpointInConfigFile(setup *caseSetup) { + setup.configFile.baseUrl = baseConfigFileEndpoint +} + +func withUseFIPSInConfig(setup *caseSetup) { + setup.config["use_fips_endpoint"] = true +} + +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectPackageNameConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: packageNameConfigEndpoint, + region: expectedCallRegion, + } +} + +func expectAwsEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: awsServiceEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectServiceConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: serviceConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, callF callFunc) { + t.Helper() + + ctx := context.Background() + + setup := caseSetup{ + config: map[string]any{}, + environmentVariables: map[string]string{}, + } + + for _, f := range testcase.with { + f(&setup) + } + + config := map[string]any{ + names.AttrAccessKey: servicemocks.MockStaticAccessKey, + names.AttrSecretKey: servicemocks.MockStaticSecretKey, + names.AttrRegion: region, + names.AttrSkipCredentialsValidation: true, + names.AttrSkipRequestingAccountID: true, + } + + maps.Copy(config, setup.config) + + if setup.configFile.baseUrl != "" || setup.configFile.serviceUrl != "" { + config[names.AttrProfile] = "default" + tempDir := t.TempDir() + writeSharedConfigFile(t, &config, tempDir, generateSharedConfigFile(setup.configFile)) + } + + for k, v := range setup.environmentVariables { + t.Setenv(k, v) + } + + p, err := provider.New(ctx) + if err != nil { + t.Fatal(err) + } + + expectedDiags := testcase.expected.diags + diags := p.Configure(ctx, terraformsdk.NewResourceConfigRaw(config)) + + if diff := cmp.Diff(diags, expectedDiags, cmp.Comparer(sdkdiag.Comparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diags.HasError() { + return + } + + meta := p.Meta().(*conns.AWSClient) + + callParams := callF(ctx, t, meta) + + if e, a := testcase.expected.endpoint, callParams.endpoint; e != a { + t.Errorf("expected endpoint %q, got %q", e, a) + } + + if e, a := testcase.expected.region, callParams.region; e != a { + t.Errorf("expected region %q, got %q", e, a) + } +} + +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + +func generateSharedConfigFile(config configFile) string { + var buf strings.Builder + + buf.WriteString(` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`) + if config.baseUrl != "" { + buf.WriteString(fmt.Sprintf("endpoint_url = %s\n", config.baseUrl)) + } + + if config.serviceUrl != "" { + buf.WriteString(fmt.Sprintf(` +services = endpoint-test + +[services endpoint-test] +%[1]s = + endpoint_url = %[2]s +`, configParam, serviceConfigFileEndpoint)) + } + + return buf.String() +} + +func writeSharedConfigFile(t *testing.T, config *map[string]any, tempDir, content string) string { + t.Helper() + + file, err := os.Create(filepath.Join(tempDir, "aws-sdk-go-base-shared-configuration-file")) + if err != nil { + t.Fatalf("creating shared configuration file: %s", err) + } + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf(" writing shared configuration file: %s", err) + } + + if v, ok := (*config)[names.AttrSharedConfigFiles]; !ok { + (*config)[names.AttrSharedConfigFiles] = []any{file.Name()} + } else { + (*config)[names.AttrSharedConfigFiles] = append(v.([]any), file.Name()) + } + + return file.Name() +} diff --git a/internal/service/s3tables/service_package_gen.go b/internal/service/s3tables/service_package_gen.go new file mode 100644 index 000000000000..2462828f3461 --- /dev/null +++ b/internal/service/s3tables/service_package_gen.go @@ -0,0 +1,49 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package s3tables + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type servicePackage struct{} + +func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { + return []*types.ServicePackageFrameworkDataSource{} +} + +func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { + return []*types.ServicePackageFrameworkResource{} +} + +func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { + return []*types.ServicePackageSDKDataSource{} +} + +func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { + return []*types.ServicePackageSDKResource{} +} + +func (p *servicePackage) ServicePackageName() string { + return names.S3Tables +} + +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*s3tables.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws.Config)) + + return s3tables.NewFromConfig(cfg, + s3tables.WithEndpointResolverV2(newEndpointResolverV2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil +} + +func ServicePackage(ctx context.Context) conns.ServicePackage { + return &servicePackage{} +} diff --git a/internal/sweep/service_packages_gen_test.go b/internal/sweep/service_packages_gen_test.go index 4e11fb03bff2..bb288e0459dd 100644 --- a/internal/sweep/service_packages_gen_test.go +++ b/internal/sweep/service_packages_gen_test.go @@ -208,6 +208,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" "github.com/hashicorp/terraform-provider-aws/internal/service/s3outposts" + "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" "github.com/hashicorp/terraform-provider-aws/internal/service/scheduler" "github.com/hashicorp/terraform-provider-aws/internal/service/schemas" @@ -458,6 +459,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { s3.ServicePackage(ctx), s3control.ServicePackage(ctx), s3outposts.ServicePackage(ctx), + s3tables.ServicePackage(ctx), sagemaker.ServicePackage(ctx), scheduler.ServicePackage(ctx), schemas.ServicePackage(ctx), diff --git a/names/consts_gen.go b/names/consts_gen.go index d6bb0032a28e..2ff235a3f3e3 100644 --- a/names/consts_gen.go +++ b/names/consts_gen.go @@ -202,6 +202,7 @@ const ( S3 = "s3" S3Control = "s3control" S3Outposts = "s3outposts" + S3Tables = "s3tables" SES = "ses" SESV2 = "sesv2" SFN = "sfn" @@ -452,6 +453,7 @@ const ( S3ServiceID = "S3" S3ControlServiceID = "S3 Control" S3OutpostsServiceID = "S3Outposts" + S3TablesServiceID = "S3Tables" SESServiceID = "SES" SESV2ServiceID = "SESv2" SFNServiceID = "SFN" diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl index 43a89b771964..d51897bf5a0d 100644 --- a/names/data/names_data.hcl +++ b/names/data/names_data.hcl @@ -7482,6 +7482,28 @@ service "s3control" { brand = "AWS" } +service "s3tables" { + sdk { + id = "S3Tables" + } + + names { + provider_name_upper = "S3Tables" + human_friendly = "S3 Tables" + } + + endpoint_info { + endpoint_api_call = "ListTableBuckets" + } + + resource_prefix { + correct = "aws_s3tables_" + } + + doc_prefix = ["s3tables_"] + brand = "Amazon" +} + service "glacier" { sdk { id = "Glacier" diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index 6a9803a3080f..ebac5cea5700 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -203,6 +203,7 @@ Route 53 Resolver S3 (Simple Storage) S3 Control S3 Glacier +S3 Tables S3 on Outposts SDB (SimpleDB) SES (Simple Email) diff --git a/website/docs/guides/custom-service-endpoints.html.markdown b/website/docs/guides/custom-service-endpoints.html.markdown index 3e0ca3b8c5e9..88ea867bb49a 100644 --- a/website/docs/guides/custom-service-endpoints.html.markdown +++ b/website/docs/guides/custom-service-endpoints.html.markdown @@ -283,6 +283,7 @@ provider "aws" { |S3 (Simple Storage)|`s3`(or `s3api`)|`AWS_ENDPOINT_URL_S3`|`s3`| |S3 Control|`s3control`|`AWS_ENDPOINT_URL_S3_CONTROL`|`s3_control`| |S3 on Outposts|`s3outposts`|`AWS_ENDPOINT_URL_S3OUTPOSTS`|`s3outposts`| +|S3 Tables|`s3tables`|`AWS_ENDPOINT_URL_S3TABLES`|`s3tables`| |SageMaker|`sagemaker`|`AWS_ENDPOINT_URL_SAGEMAKER`|`sagemaker`| |EventBridge Scheduler|`scheduler`|`AWS_ENDPOINT_URL_SCHEDULER`|`scheduler`| |EventBridge Schemas|`schemas`|`AWS_ENDPOINT_URL_SCHEMAS`|`schemas`| From 891b920572a47c50b469d4393fd8e24f3869b76e Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 7 Nov 2024 14:13:51 -0800 Subject: [PATCH 02/35] Adds `PrefixNoneOf` and `SuffixNoneOf` framework validators --- .../framework/validators/prefix_none_of.go | 61 +++++++++ .../validators/prefix_none_of_test.go | 121 ++++++++++++++++++ .../framework/validators/suffix_none_of.go | 61 +++++++++ .../validators/suffix_none_of_test.go | 116 +++++++++++++++++ 4 files changed, 359 insertions(+) create mode 100644 internal/framework/validators/prefix_none_of.go create mode 100644 internal/framework/validators/prefix_none_of_test.go create mode 100644 internal/framework/validators/suffix_none_of.go create mode 100644 internal/framework/validators/suffix_none_of_test.go diff --git a/internal/framework/validators/prefix_none_of.go b/internal/framework/validators/prefix_none_of.go new file mode 100644 index 000000000000..f5a03bd4ddb3 --- /dev/null +++ b/internal/framework/validators/prefix_none_of.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validators + +import ( + "context" + "fmt" + "slices" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" +) + +var _ validator.String = prefixNoneOfValidator{} + +type prefixNoneOfValidator struct { + values []string +} + +func (v prefixNoneOfValidator) Description(ctx context.Context) string { + return v.MarkdownDescription(ctx) +} + +func (v prefixNoneOfValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("value must begin with none of: %s", tfslices.ApplyToAll(v.values, func(v string) string { + return `"` + v + `"` + })) +} + +func (v prefixNoneOfValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + for _, otherValue := range v.values { + if !strings.HasPrefix(value, otherValue) { + continue + } + + response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic( + request.Path, + v.Description(ctx), + request.ConfigValue.String(), + )) + + break + } +} + +// PrefixNoneOf checks that the String held in the attribute +// begins with none of the given `values`. +func PrefixNoneOf(values ...string) prefixNoneOfValidator { + return prefixNoneOfValidator{ + values: slices.Clone(values), + } +} diff --git a/internal/framework/validators/prefix_none_of_test.go b/internal/framework/validators/prefix_none_of_test.go new file mode 100644 index 000000000000..a43b8804ebba --- /dev/null +++ b/internal/framework/validators/prefix_none_of_test.go @@ -0,0 +1,121 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validators_test + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" +) + +func TestPrefixNoneOfValidator(t *testing.T) { + t.Parallel() + + type testCase struct { + in types.String + prefixNoneOfValues []string + expectError bool + } + + testCases := map[string]testCase{ + "simple-match": { + in: types.StringValue("prefix"), + prefixNoneOfValues: []string{ + "pre", + "first", + "1st", + }, + expectError: true, + }, + "simple-mismatch-case-insensitive": { + in: types.StringValue("prefix"), + prefixNoneOfValues: []string{ + "PRE", + "first", + "1st", + }, + }, + "simple-mismatch": { + in: types.StringValue("prefix"), + prefixNoneOfValues: []string{ + "pri", + "first", + "1st", + }, + }, + "skip-validation-on-null": { + in: types.StringNull(), + prefixNoneOfValues: []string{ + "pre", + "first", + "1st", + }, + }, + "skip-validation-on-unknown": { + in: types.StringUnknown(), + prefixNoneOfValues: []string{ + "pre", + "first", + "1st", + }, + }, + } + + for name, test := range testCases { + name, test := name, test + + t.Run(fmt.Sprintf("ValidateString - %s", name), func(t *testing.T) { + t.Parallel() + req := validator.StringRequest{ + ConfigValue: test.in, + } + res := validator.StringResponse{} + validators.PrefixNoneOf(test.prefixNoneOfValues...).ValidateString(context.TODO(), req, &res) + + if !res.Diagnostics.HasError() && test.expectError { + t.Fatal("expected error, got no error") + } + + if res.Diagnostics.HasError() && !test.expectError { + t.Fatalf("got unexpected error: %s", res.Diagnostics) + } + }) + } +} + +func TestPrefixNoneOfValidator_Description(t *testing.T) { + t.Parallel() + + type testCase struct { + in []string + expected string + } + + testCases := map[string]testCase{ + "quoted-once": { + in: []string{"foo", "bar", "baz"}, + expected: `value must begin with none of: ["foo" "bar" "baz"]`, + }, + } + + for name, test := range testCases { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + v := validators.PrefixNoneOf(test.in...) + + got := v.MarkdownDescription(context.Background()) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected difference: %s", diff) + } + }) + } +} diff --git a/internal/framework/validators/suffix_none_of.go b/internal/framework/validators/suffix_none_of.go new file mode 100644 index 000000000000..5d96c7f6b0d2 --- /dev/null +++ b/internal/framework/validators/suffix_none_of.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validators + +import ( + "context" + "fmt" + "slices" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" +) + +var _ validator.String = suffixNoneOfValidator{} + +type suffixNoneOfValidator struct { + values []string +} + +func (v suffixNoneOfValidator) Description(ctx context.Context) string { + return v.MarkdownDescription(ctx) +} + +func (v suffixNoneOfValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("value must end with none of: %s", tfslices.ApplyToAll(v.values, func(v string) string { + return `"` + v + `"` + })) +} + +func (v suffixNoneOfValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + for _, otherValue := range v.values { + if !strings.HasSuffix(value, otherValue) { + continue + } + + response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic( + request.Path, + v.Description(ctx), + request.ConfigValue.String(), + )) + + break + } +} + +// SuffixNoneOf checks that the String held in the attribute +// ends with none of the given `values`. +func SuffixNoneOf(values ...string) suffixNoneOfValidator { + return suffixNoneOfValidator{ + values: slices.Clone(values), + } +} diff --git a/internal/framework/validators/suffix_none_of_test.go b/internal/framework/validators/suffix_none_of_test.go new file mode 100644 index 000000000000..fe62d184b891 --- /dev/null +++ b/internal/framework/validators/suffix_none_of_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package validators_test + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" +) + +func TestSuffixNoneOfValidator(t *testing.T) { + t.Parallel() + + type testCase struct { + in types.String + suffixNoneOfValues []string + expectError bool + } + + testCases := map[string]testCase{ + "simple-match": { + in: types.StringValue("suffix"), + suffixNoneOfValues: []string{ + "fix", + "last", + }, + expectError: true, + }, + "simple-mismatch-case-insensitive": { + in: types.StringValue("suffix"), + suffixNoneOfValues: []string{ + "FIX", + "last", + }, + }, + "simple-mismatch": { + in: types.StringValue("suffix"), + suffixNoneOfValues: []string{ + "fax", + "last", + }, + }, + "skip-validation-on-null": { + in: types.StringNull(), + suffixNoneOfValues: []string{ + "fix", + "last", + }, + }, + "skip-validation-on-unknown": { + in: types.StringUnknown(), + suffixNoneOfValues: []string{ + "fix", + "last", + }, + }, + } + + for name, test := range testCases { + name, test := name, test + + t.Run(fmt.Sprintf("ValidateString - %s", name), func(t *testing.T) { + t.Parallel() + req := validator.StringRequest{ + ConfigValue: test.in, + } + res := validator.StringResponse{} + validators.SuffixNoneOf(test.suffixNoneOfValues...).ValidateString(context.TODO(), req, &res) + + if !res.Diagnostics.HasError() && test.expectError { + t.Fatal("expected error, got no error") + } + + if res.Diagnostics.HasError() && !test.expectError { + t.Fatalf("got unexpected error: %s", res.Diagnostics) + } + }) + } +} + +func TestSuffixNoneOfValidator_Description(t *testing.T) { + t.Parallel() + + type testCase struct { + in []string + expected string + } + + testCases := map[string]testCase{ + "quoted-once": { + in: []string{"foo", "bar", "baz"}, + expected: `value must end with none of: ["foo" "bar" "baz"]`, + }, + } + + for name, test := range testCases { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + v := validators.SuffixNoneOf(test.in...) + + got := v.MarkdownDescription(context.Background()) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected difference: %s", diff) + } + }) + } +} From 394e4cfefba1481f65de8acae5a8b1e9514e130b Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 7 Nov 2024 14:47:26 -0800 Subject: [PATCH 03/35] Adds resource `aws_s3tables_table_bucket` --- go.mod | 2 +- internal/service/s3tables/exports_test.go | 14 ++ .../service/s3tables/service_package_gen.go | 7 +- internal/service/s3tables/table_bucket.go | 228 ++++++++++++++++++ .../service/s3tables/table_bucket_test.go | 155 ++++++++++++ .../r/s3tables_table_bucket.html.markdown | 55 +++++ 6 files changed, 459 insertions(+), 2 deletions(-) create mode 100644 internal/service/s3tables/exports_test.go create mode 100644 internal/service/s3tables/table_bucket.go create mode 100644 internal/service/s3tables/table_bucket_test.go create mode 100644 website/docs/r/s3tables_table_bucket.html.markdown diff --git a/go.mod b/go.mod index c609033e5394..9f0b9d71b37d 100644 --- a/go.mod +++ b/go.mod @@ -216,6 +216,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0 github.com/aws/aws-sdk-go-v2/service/s3control v1.52.0 github.com/aws/aws-sdk-go-v2/service/s3outposts v1.28.7 + github.com/aws/aws-sdk-go-v2/service/s3tables v1.0.0 github.com/aws/aws-sdk-go-v2/service/sagemaker v1.168.1 github.com/aws/aws-sdk-go-v2/service/scheduler v1.12.7 github.com/aws/aws-sdk-go-v2/service/schemas v1.28.8 @@ -325,7 +326,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect - github.com/aws/aws-sdk-go-v2/service/s3tables v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect github.com/bgentry/speakeasy v0.2.0 // indirect github.com/boombuler/barcode v1.0.1 // indirect diff --git a/internal/service/s3tables/exports_test.go b/internal/service/s3tables/exports_test.go new file mode 100644 index 000000000000..fcf9a348cb5e --- /dev/null +++ b/internal/service/s3tables/exports_test.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables + +var ( + NewResourceTableBucket = newResourceTableBucket + + FindTableBucket = findTableBucket +) + +const ( + ResNameTableBucket = resNameTableBucket +) diff --git a/internal/service/s3tables/service_package_gen.go b/internal/service/s3tables/service_package_gen.go index 2462828f3461..9722356e1fdc 100644 --- a/internal/service/s3tables/service_package_gen.go +++ b/internal/service/s3tables/service_package_gen.go @@ -19,7 +19,12 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { - return []*types.ServicePackageFrameworkResource{} + return []*types.ServicePackageFrameworkResource{ + { + Factory: newResourceTableBucket, + Name: "Table Bucket", + }, + } } func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go new file mode 100644 index 000000000000..1c38b672da13 --- /dev/null +++ b/internal/service/s3tables/table_bucket.go @@ -0,0 +1,228 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables + +import ( + "context" + "errors" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_s3tables_table_bucket", name="Table Bucket") +func newResourceTableBucket(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceTableBucket{}, nil +} + +const ( + resNameTableBucket = "Table Bucket" +) + +type resourceTableBucket struct { + framework.ResourceWithConfigure + framework.WithNoUpdate +} + +func (r *resourceTableBucket) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_s3tables_table_bucket" +} + +func (r *resourceTableBucket) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrCreatedAt: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(3, 63), + stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z-]*$`), "must contain only lowercase letters, numbers, or hyphens"), + stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z].*[0-9a-z]$`), "must start and end with a letter or number"), + validators.PrefixNoneOf( + "xn--", + "sthree-", + "sthree-configurator", + "amzn-s3-demo-", + ), + validators.SuffixNoneOf( + "-s3alias", + "--ol-s3", + ".mrap", + "--x-s3", + ), + }, + }, + names.AttrOwnerAccountID: schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +func (r *resourceTableBucket) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var plan resourceTableBucketModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var input s3tables.CreateTableBucketInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := conn.CreateTableBucket(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + return + } + if out == nil || out.Arn == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + bucket, err := findTableBucket(ctx, conn, aws.ToString(out.Arn)) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + } + + resp.Diagnostics.Append(flex.Flatten(ctx, bucket, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceTableBucket) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceTableBucketModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findTableBucket(ctx, conn, state.ARN.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionSetting, resNameTableBucket, state.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceTableBucket) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceTableBucketModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := &s3tables.DeleteTableBucketInput{ + TableBucketARN: state.ARN.ValueStringPointer(), + } + + _, err := conn.DeleteTableBucket(ctx, input) + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, resNameTableBucket, state.Name.String(), err), + err.Error(), + ) + return + } +} + +func (r *resourceTableBucket) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root(names.AttrARN), req, resp) +} + +func findTableBucket(ctx context.Context, conn *s3tables.Client, arn string) (*s3tables.GetTableBucketOutput, error) { + in := s3tables.GetTableBucketInput{ + TableBucketARN: aws.String(arn), + } + + out, err := conn.GetTableBucket(ctx, &in) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +type resourceTableBucketModel struct { + ARN types.String `tfsdk:"arn"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + Name types.String `tfsdk:"name"` + OwnerAccountID types.String `tfsdk:"owner_account_id"` +} diff --git a/internal/service/s3tables/table_bucket_test.go b/internal/service/s3tables/table_bucket_test.go new file mode 100644 index 000000000000..d98ab6833378 --- /dev/null +++ b/internal/service/s3tables/table_bucket_test.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/s3tables" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3TablesTableBucket_basic(t *testing.T) { + ctx := acctest.Context(t) + + var tablebucket s3tables.GetTableBucketOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3tables_table_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableBucketConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "s3tables", "bucket/"+rName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + acctest.CheckResourceAttrAccountID(resourceName, names.AttrOwnerAccountID), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccS3TablesTableBucket_disappears(t *testing.T) { + ctx := acctest.Context(t) + + var tablebucket s3tables.GetTableBucketOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3tables_table_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableBucketConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.NewResourceTableBucket, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckTableBucketDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3tables_table_bucket" { + continue + } + + _, err := tfs3tables.FindTableBucket(ctx, conn, rs.Primary.Attributes[names.AttrARN]) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTableBucket, rs.Primary.ID, err) + } + + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTableBucket, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckTableBucketExists(ctx context.Context, name string, tablebucket *s3tables.GetTableBucketOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucket, name, errors.New("not found")) + } + + if rs.Primary.Attributes[names.AttrARN] == "" { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucket, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + resp, err := tfs3tables.FindTableBucket(ctx, conn, rs.Primary.Attributes[names.AttrARN]) + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucket, rs.Primary.ID, err) + } + + *tablebucket = *resp + + return nil + } +} + +func testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + _, err := conn.ListTableBuckets(ctx, &s3tables.ListTableBucketsInput{}) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccTableBucketConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_s3tables_table_bucket" "test" { + name = %[1]q +} +`, rName) +} diff --git a/website/docs/r/s3tables_table_bucket.html.markdown b/website/docs/r/s3tables_table_bucket.html.markdown new file mode 100644 index 000000000000..09a22120a9af --- /dev/null +++ b/website/docs/r/s3tables_table_bucket.html.markdown @@ -0,0 +1,55 @@ +--- +subcategory: "S3 Tables" +layout: "aws" +page_title: "AWS: aws_s3tables_table_bucket" +description: |- + Terraform resource for managing an AWS S3 Tables Table Bucket. +--- + +# Resource: aws_s3tables_table_bucket + +Terraform resource for managing an AWS S3 Tables Table Bucket. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3tables_table_bucket" "example" { + name = "example-bucket" +} +``` + +## Argument Reference + +The following argument is required: + +* `name` - (Required, Forces new resource) Name of the table bucket. + Must be between 3 and 63 characters in length. + Can consist of lowercase letters, numbers, and hyphens, and must begin and end with a lowercase letter or number. + A full list of bucket naming rules may be found in [S3 Tables documentation](???). + +## Attribute Reference + +This resource exports the following attributes in addition to the argument above: + +* `arn` - ARN of the table bucket. +* `created_at` - Date and time when the bucket was created. +* `owner_account_id` - Account ID of the account that owns the table bucket. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket using the `arn`. For example: + +```terraform +import { + to = aws_s3tables_table_bucket.example + id = "arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket" +} +``` + +Using `terraform import`, import S3 Tables Table Bucket using the `arn`. For example: + +```console +% terraform import aws_s3tables_table_bucket.example arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket +``` From f3005eaa8d759a58e799f23858e821c43f278aa1 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 7 Nov 2024 16:49:12 -0800 Subject: [PATCH 04/35] Updates `skaff` to use Finder functions in Exists and Destroy tests --- skaff/resource/resourcetest.gtpl | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/skaff/resource/resourcetest.gtpl b/skaff/resource/resourcetest.gtpl index 0bc28db06741..4b7585caf643 100644 --- a/skaff/resource/resourcetest.gtpl +++ b/skaff/resource/resourcetest.gtpl @@ -252,18 +252,15 @@ func testAccCheck{{ .Resource }}Destroy(ctx context.Context) resource.TestCheckF continue } - input := &{{ .SDKPackage }}.Describe{{ .Resource }}Input{ - {{ .Resource }}Id: aws.String(rs.Primary.ID), - } - - _, err := conn.Describe{{ .Resource }}(ctx, &{{ .SDKPackage }}.Describe{{ .Resource }}Input{ - {{ .Resource }}Id: aws.String(rs.Primary.ID), - }) - - if errs.IsA[*types.ResourceNotFoundException](err){ + {{ if .IncludeComments }} + // TIP: ==== FINDERS ==== + // The find function should be exported. Since it won't be used outside of the package, it can be exported + // in the `exports_test.go` file. + {{- end }} + _, err := tf{{ .ServicePackage }}.Find{{ .Resource }}ByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { return nil } - if err != nil { return create.Error(names.{{ .Service }}, create.ErrActionCheckingDestroyed, tf{{ .ServicePackage }}.ResName{{ .Resource }}, rs.Primary.ID, err) } @@ -288,10 +285,7 @@ func testAccCheck{{ .Resource }}Exists(ctx context.Context, name string, {{ .Res conn := acctest.Provider.Meta().(*conns.AWSClient).{{ .Service }}Client(ctx) - resp, err := conn.Describe{{ .Resource }}(ctx, &{{ .SDKPackage }}.Describe{{ .Resource }}Input{ - {{ .Resource }}Id: aws.String(rs.Primary.ID), - }) - + resp, err := tf{{ .ServicePackage }}.Find{{ .Resource }}ByID(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.{{ .Service }}, create.ErrActionCheckingExistence, tf{{ .ServicePackage }}.ResName{{ .Resource }}, rs.Primary.ID, err) } From ba56b11df1d73e6af57f510a3066a2af4f05cf6a Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 8 Nov 2024 14:46:40 -0800 Subject: [PATCH 05/35] `skaff`: Adds `ctx` parameter to `testAccPreCheck` call --- skaff/resource/resourcetest.gtpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/skaff/resource/resourcetest.gtpl b/skaff/resource/resourcetest.gtpl index 4b7585caf643..087c11dca964 100644 --- a/skaff/resource/resourcetest.gtpl +++ b/skaff/resource/resourcetest.gtpl @@ -213,7 +213,7 @@ func TestAcc{{ .Service }}{{ .Resource }}_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.{{ .Service }}EndpointID) - testAccPreCheck(t) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.{{ .Service }}ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, From b544c15a2a55488e972bdd483d5796185af4a077 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 8 Nov 2024 16:11:01 -0800 Subject: [PATCH 06/35] Removes unneeded indirection --- .../devopsguru/resource_collection_test.go | 52 +++++++++---------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/internal/service/devopsguru/resource_collection_test.go b/internal/service/devopsguru/resource_collection_test.go index 3e59fa3b7dca..aba224f5f205 100644 --- a/internal/service/devopsguru/resource_collection_test.go +++ b/internal/service/devopsguru/resource_collection_test.go @@ -82,7 +82,7 @@ func testAccResourceCollection_disappears(t *testing.T) { Config: testAccResourceCollectionConfig_basic(), Check: resource.ComposeTestCheckFunc( testAccCheckResourceCollectionExists(ctx, resourceName, &resourcecollection), - acctest.CheckFrameworkResourceDisappearsWithStateFunc(ctx, acctest.Provider, tfdevopsguru.ResourceResourceCollection, resourceName, resourceCollectionDisappearsStateFunc()), + acctest.CheckFrameworkResourceDisappearsWithStateFunc(ctx, acctest.Provider, tfdevopsguru.ResourceResourceCollection, resourceName, resourceCollectionDisappearsStateFunc), ), ExpectNonEmptyPlan: true, }, @@ -200,37 +200,35 @@ func testAccResourceCollection_tagsAllResources(t *testing.T) { }) } -func resourceCollectionDisappearsStateFunc() func(ctx context.Context, state *tfsdk.State, is *terraform.InstanceState) error { - return func(ctx context.Context, state *tfsdk.State, is *terraform.InstanceState) error { - if err := fwdiag.DiagnosticsError(state.SetAttribute(ctx, path.Root(names.AttrID), is.Attributes[names.AttrID])); err != nil { - return err - } - - // The delete operation requires passing in the configured array of stack names - // with a "REMOVE" action. Manually construct the root cloudformation attribute - // to match what is created by the _basic test configuration. - var diags diag.Diagnostics - attrType := map[string]attr.Type{"stack_names": fwtypes.ListType{ElemType: fwtypes.StringType}} - obj := map[string]attr.Value{ - "stack_names": flex.FlattenFrameworkStringValueList(ctx, []string{"*"}), - } - objVal, d := fwtypes.ObjectValue(attrType, obj) - diags.Append(d...) +func resourceCollectionDisappearsStateFunc(ctx context.Context, state *tfsdk.State, is *terraform.InstanceState) error { + if err := fwdiag.DiagnosticsError(state.SetAttribute(ctx, path.Root(names.AttrID), is.Attributes[names.AttrID])); err != nil { + return err + } - elemType := fwtypes.ObjectType{AttrTypes: attrType} - listVal, d := fwtypes.ListValue(elemType, []attr.Value{objVal}) - diags.Append(d...) + // The delete operation requires passing in the configured array of stack names + // with a "REMOVE" action. Manually construct the root cloudformation attribute + // to match what is created by the _basic test configuration. + var diags diag.Diagnostics + attrType := map[string]attr.Type{"stack_names": fwtypes.ListType{ElemType: fwtypes.StringType}} + obj := map[string]attr.Value{ + "stack_names": flex.FlattenFrameworkStringValueList(ctx, []string{"*"}), + } + objVal, d := fwtypes.ObjectValue(attrType, obj) + diags.Append(d...) - if diags.HasError() { - return fwdiag.DiagnosticsError(diags) - } + elemType := fwtypes.ObjectType{AttrTypes: attrType} + listVal, d := fwtypes.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) - if err := fwdiag.DiagnosticsError(state.SetAttribute(ctx, path.Root("cloudformation"), listVal)); err != nil { - return err - } + if diags.HasError() { + return fwdiag.DiagnosticsError(diags) + } - return nil + if err := fwdiag.DiagnosticsError(state.SetAttribute(ctx, path.Root("cloudformation"), listVal)); err != nil { + return err } + + return nil } func testAccCheckResourceCollectionDestroy(ctx context.Context) resource.TestCheckFunc { From 7e619552bbb979fe784bbf4e6ecfea8402812c0f Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 8 Nov 2024 16:42:14 -0800 Subject: [PATCH 07/35] Adds resource `aws_s3tables_namespace` --- internal/service/s3tables/exports_test.go | 5 + internal/service/s3tables/namespace.go | 267 ++++++++++++++++++ internal/service/s3tables/namespace_test.go | 189 +++++++++++++ internal/service/s3tables/s3tables_test.go | 25 ++ .../service/s3tables/service_package_gen.go | 4 + .../service/s3tables/table_bucket_test.go | 12 - .../docs/r/s3tables_namespace.html.markdown | 61 ++++ 7 files changed, 551 insertions(+), 12 deletions(-) create mode 100644 internal/service/s3tables/namespace.go create mode 100644 internal/service/s3tables/namespace_test.go create mode 100644 internal/service/s3tables/s3tables_test.go create mode 100644 website/docs/r/s3tables_namespace.html.markdown diff --git a/internal/service/s3tables/exports_test.go b/internal/service/s3tables/exports_test.go index fcf9a348cb5e..d352d928b72d 100644 --- a/internal/service/s3tables/exports_test.go +++ b/internal/service/s3tables/exports_test.go @@ -4,11 +4,16 @@ package s3tables var ( + NewResourceNamespace = newResourceNamespace NewResourceTableBucket = newResourceTableBucket + FindNamespace = findNamespace FindTableBucket = findTableBucket ) const ( + ResNameNamespace = resNameNamespace ResNameTableBucket = resNameTableBucket + + NamespaceIDSeparator = namespaceIDSeparator ) diff --git a/internal/service/s3tables/namespace.go b/internal/service/s3tables/namespace.go new file mode 100644 index 000000000000..7bf04f6fb012 --- /dev/null +++ b/internal/service/s3tables/namespace.go @@ -0,0 +1,267 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_s3tables_namespace", name="Namespace") +func newResourceNamespace(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceNamespace{}, nil +} + +const ( + resNameNamespace = "Namespace" +) + +type resourceNamespace struct { + framework.ResourceWithConfigure + framework.WithNoUpdate +} + +func (r *resourceNamespace) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_s3tables_namespace" +} +func (r *resourceNamespace) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrCreatedAt: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + names.AttrNamespace: schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Required: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + listvalidator.ValueStringsAre( + stringvalidator.LengthBetween(1, 255), + stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z_]*$`), "must contain only lowercase letters, numbers, or underscores"), + stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z].*[0-9a-z]$`), "must start and end with a letter or number"), + ), + }, + }, + names.AttrOwnerAccountID: schema.StringAttribute{ + Computed: true, + }, + "table_bucket_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *resourceNamespace) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var plan resourceNamespaceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var input s3tables.CreateNamespaceInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := conn.CreateNamespace(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameNamespace, plan.Namespace.String(), err), + err.Error(), + ) + return + } + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameNamespace, plan.Namespace.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + namespace, err := findNamespace(ctx, conn, plan.TableBucketARN.ValueString(), out.Namespace[0]) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameNamespace, plan.Namespace.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, namespace, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceNamespace) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceNamespaceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + var elements []string + state.Namespace.ElementsAs(ctx, &elements, false) + namespace := elements[0] + + out, err := findNamespace(ctx, conn, state.TableBucketARN.ValueString(), namespace) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionSetting, resNameNamespace, state.Namespace.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceNamespace) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceNamespaceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + var elements []string + state.Namespace.ElementsAs(ctx, &elements, false) + namespace := elements[0] + + input := s3tables.DeleteNamespaceInput{ + Namespace: aws.String(namespace), + TableBucketARN: state.TableBucketARN.ValueStringPointer(), + } + + _, err := conn.DeleteNamespace(ctx, &input) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, resNameNamespace, state.Namespace.String(), err), + err.Error(), + ) + return + } +} + +const namespaceIDSeparator = ";" + +func (r *resourceNamespace) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + parts := strings.Split(req.ID, namespaceIDSeparator) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + resp.Diagnostics.AddError( + "Invalid Import ID", + "Import IDs for S3 Tables Namespaces must use the format "+namespaceIDSeparator+".\n"+ + fmt.Sprintf("Had %q", req.ID), + ) + return + } + + state := resourceNamespaceModel{ + TableBucketARN: fwtypes.ARNValue(parts[0]), + Namespace: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{types.StringValue(parts[1])}), + } + + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} +func findNamespace(ctx context.Context, conn *s3tables.Client, bucketARN, name string) (*s3tables.GetNamespaceOutput, error) { + in := &s3tables.GetNamespaceInput{ + Namespace: aws.String(name), + TableBucketARN: aws.String(bucketARN), + } + + out, err := conn.GetNamespace(ctx, in) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +type resourceNamespaceModel struct { + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + CreatedBy types.String `tfsdk:"created_by"` + Namespace fwtypes.ListValueOf[types.String] `tfsdk:"namespace"` + OwnerAccountID types.String `tfsdk:"owner_account_id"` + TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` +} diff --git a/internal/service/s3tables/namespace_test.go b/internal/service/s3tables/namespace_test.go new file mode 100644 index 000000000000..e1a806e6cf58 --- /dev/null +++ b/internal/service/s3tables/namespace_test.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/s3tables" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3TablesNamespace_basic(t *testing.T) { + ctx := acctest.Context(t) + + var namespace s3tables.GetNamespaceOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := strings.ReplaceAll(bucketName, "-", "_") + resourceName := "aws_s3tables_namespace.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckNamespaceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccNamespaceConfig_basic(rName, bucketName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNamespaceExists(ctx, resourceName, &namespace), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + acctest.CheckResourceAttrAccountID(resourceName, "created_by"), + resource.TestCheckResourceAttr(resourceName, "namespace.#", "1"), + resource.TestCheckResourceAttr(resourceName, "namespace.0", rName), + acctest.CheckResourceAttrAccountID(resourceName, names.AttrOwnerAccountID), + resource.TestCheckResourceAttrPair(resourceName, "table_bucket_arn", "aws_s3tables_table_bucket.test", names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccNamespaceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "namespace.0", + }, + }, + }) +} + +func TestAccS3TablesNamespace_disappears(t *testing.T) { + ctx := acctest.Context(t) + + var namespace s3tables.GetNamespaceOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := strings.ReplaceAll(bucketName, "-", "_") + resourceName := "aws_s3tables_namespace.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckNamespaceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccNamespaceConfig_basic(rName, bucketName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNamespaceExists(ctx, resourceName, &namespace), + acctest.CheckFrameworkResourceDisappearsWithStateFunc(ctx, acctest.Provider, tfs3tables.NewResourceNamespace, resourceName, namespaceDisappearsStateFunc), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckNamespaceDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3tables_namespace" { + continue + } + + _, err := tfs3tables.FindNamespace(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes["namespace.0"]) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameNamespace, rs.Primary.ID, err) + } + + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameNamespace, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckNamespaceExists(ctx context.Context, name string, namespace *s3tables.GetNamespaceOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, name, errors.New("not found")) + } + + if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes["namespace.0"] == "" { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + resp, err := tfs3tables.FindNamespace(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes["namespace.0"]) + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, rs.Primary.ID, err) + } + + *namespace = *resp + + return nil + } +} + +func testAccNamespaceImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("not found: %s", resourceName) + } + + return rs.Primary.Attributes["table_bucket_arn"] + tfs3tables.NamespaceIDSeparator + rs.Primary.Attributes["namespace.0"], nil + } +} + +func namespaceDisappearsStateFunc(ctx context.Context, state *tfsdk.State, is *terraform.InstanceState) error { + v, ok := is.Attributes["namespace.0"] + if !ok { + return errors.New(`Identifying attribute "namespace.0" not defined`) + } + + if err := fwdiag.DiagnosticsError(state.SetAttribute(ctx, path.Root(names.AttrNamespace), []string{v})); err != nil { + return err + } + + v, ok = is.Attributes["table_bucket_arn"] + if !ok { + return errors.New(`Identifying attribute "table_bucket_arn" not defined`) + } + + if err := fwdiag.DiagnosticsError(state.SetAttribute(ctx, path.Root("table_bucket_arn"), v)); err != nil { + return err + } + + return nil +} + +func testAccNamespaceConfig_basic(rName, bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3tables_namespace" "test" { + namespace = [%[1]q] + table_bucket_arn = aws_s3tables_table_bucket.test.arn +} + +resource "aws_s3tables_table_bucket" "test" { + name = %[2]q +} +`, rName, bucketName) +} diff --git a/internal/service/s3tables/s3tables_test.go b/internal/service/s3tables/s3tables_test.go new file mode 100644 index 000000000000..84a81b3361c3 --- /dev/null +++ b/internal/service/s3tables/s3tables_test.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables_test + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" +) + +func testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + _, err := conn.ListTableBuckets(ctx, &s3tables.ListTableBucketsInput{}) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} diff --git a/internal/service/s3tables/service_package_gen.go b/internal/service/s3tables/service_package_gen.go index 9722356e1fdc..79826ccd8257 100644 --- a/internal/service/s3tables/service_package_gen.go +++ b/internal/service/s3tables/service_package_gen.go @@ -20,6 +20,10 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { return []*types.ServicePackageFrameworkResource{ + { + Factory: newResourceNamespace, + Name: "Namespace", + }, { Factory: newResourceTableBucket, Name: "Table Bucket", diff --git a/internal/service/s3tables/table_bucket_test.go b/internal/service/s3tables/table_bucket_test.go index d98ab6833378..b44319222305 100644 --- a/internal/service/s3tables/table_bucket_test.go +++ b/internal/service/s3tables/table_bucket_test.go @@ -134,18 +134,6 @@ func testAccCheckTableBucketExists(ctx context.Context, name string, tablebucket } } -func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) - - _, err := conn.ListTableBuckets(ctx, &s3tables.ListTableBucketsInput{}) - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} - func testAccTableBucketConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_s3tables_table_bucket" "test" { diff --git a/website/docs/r/s3tables_namespace.html.markdown b/website/docs/r/s3tables_namespace.html.markdown new file mode 100644 index 000000000000..9919b941bc8d --- /dev/null +++ b/website/docs/r/s3tables_namespace.html.markdown @@ -0,0 +1,61 @@ +--- +subcategory: "S3 Tables" +layout: "aws" +page_title: "AWS: aws_s3tables_namespace" +description: |- + Terraform resource for managing an AWS S3 Tables Namespace. +--- + +# Resource: aws_s3tables_namespace + +Terraform resource for managing an AWS S3 Tables Namespace. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3tables_namespace" "example" { + namespace = ["example-namespace"] + table_bucket_arn = aws_s3tables_table_bucket.example.arn +} + +resource "aws_s3tables_table_bucket" "example" { + name = "example-bucket" +} +``` + +## Argument Reference + +The following arguments are required: + +* `namespace` - (Required, Forces new resource) Name of the namespace. + Note that this is a list with a maximum size of 1. + Must be between 1 and 255 characters in length. + Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. +* `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that contains this Namespace. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `created_at` - Date and time when the namespace was created. +* `created_by` - Account ID of the account that created the namespace. +* `owner_account_id` - Account ID of the account that owns the namespace. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). For example: + +```terraform +import { + to = aws_s3tables_namespace.example + id = "arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace" +} +``` + +Using `terraform import`, import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). For example: + +```console +% terraform import aws_s3tables_namespace.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' +``` From 8f015fddcf201db298f5d29cd29063cf34f5b045 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Nov 2024 17:07:00 -0800 Subject: [PATCH 08/35] Adds resource `aws_s3tables_table_bucket_policy` --- internal/service/s3tables/exports_test.go | 10 +- .../service/s3tables/service_package_gen.go | 4 + .../service/s3tables/table_bucket_policy.go | 229 ++++++++++++++++++ .../s3tables/table_bucket_policy_test.go | 163 +++++++++++++ ...s3tables_table_bucket_policy.html.markdown | 56 +++++ 5 files changed, 458 insertions(+), 4 deletions(-) create mode 100644 internal/service/s3tables/table_bucket_policy.go create mode 100644 internal/service/s3tables/table_bucket_policy_test.go create mode 100644 website/docs/r/s3tables_table_bucket_policy.html.markdown diff --git a/internal/service/s3tables/exports_test.go b/internal/service/s3tables/exports_test.go index d352d928b72d..8f2dae544232 100644 --- a/internal/service/s3tables/exports_test.go +++ b/internal/service/s3tables/exports_test.go @@ -4,11 +4,13 @@ package s3tables var ( - NewResourceNamespace = newResourceNamespace - NewResourceTableBucket = newResourceTableBucket + NewResourceNamespace = newResourceNamespace + NewResourceTableBucket = newResourceTableBucket + NewResourceTableBucketPolicy = newResourceTableBucketPolicy - FindNamespace = findNamespace - FindTableBucket = findTableBucket + FindNamespace = findNamespace + FindTableBucket = findTableBucket + FindTableBucketPolicy = findTableBucketPolicy ) const ( diff --git a/internal/service/s3tables/service_package_gen.go b/internal/service/s3tables/service_package_gen.go index 79826ccd8257..6b0b8bce0a63 100644 --- a/internal/service/s3tables/service_package_gen.go +++ b/internal/service/s3tables/service_package_gen.go @@ -28,6 +28,10 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic Factory: newResourceTableBucket, Name: "Table Bucket", }, + { + Factory: newResourceTableBucketPolicy, + Name: "Table Bucket Policy", + }, } } diff --git a/internal/service/s3tables/table_bucket_policy.go b/internal/service/s3tables/table_bucket_policy.go new file mode 100644 index 000000000000..7b754a695856 --- /dev/null +++ b/internal/service/s3tables/table_bucket_policy.go @@ -0,0 +1,229 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_s3tables_table_bucket_policy", name="Table Bucket Policy") +func newResourceTableBucketPolicy(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceTableBucketPolicy{}, nil +} + +const ( + ResNameTableBucketPolicy = "Table Bucket Policy" +) + +type resourceTableBucketPolicy struct { + framework.ResourceWithConfigure + framework.WithTimeouts +} + +func (r *resourceTableBucketPolicy) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_s3tables_table_bucket_policy" +} + +func (r *resourceTableBucketPolicy) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "resource_policy": schema.StringAttribute{ + CustomType: fwtypes.IAMPolicyType, + Required: true, + }, + "table_bucket_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *resourceTableBucketPolicy) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var plan resourceTableBucketPolicyModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var input s3tables.PutTableBucketPolicyInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + _, err := conn.PutTableBucketPolicy(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.TableBucketARN.String(), err), + err.Error(), + ) + return + } + + out, err := findTableBucketPolicy(ctx, conn, plan.TableBucketARN.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.TableBucketARN.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceTableBucketPolicy) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceTableBucketPolicyModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findTableBucketPolicy(ctx, conn, state.TableBucketARN.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionSetting, ResNameTableBucketPolicy, state.TableBucketARN.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceTableBucketPolicy) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var plan resourceTableBucketPolicyModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var input s3tables.PutTableBucketPolicyInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + _, err := conn.PutTableBucketPolicy(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.TableBucketARN.String(), err), + err.Error(), + ) + return + } + + out, err := findTableBucketPolicy(ctx, conn, plan.TableBucketARN.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.TableBucketARN.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceTableBucketPolicy) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceTableBucketPolicyModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := s3tables.DeleteTableBucketPolicyInput{ + TableBucketARN: state.TableBucketARN.ValueStringPointer(), + } + + _, err := conn.DeleteTableBucketPolicy(ctx, &input) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, ResNameTableBucketPolicy, state.TableBucketARN.String(), err), + err.Error(), + ) + return + } +} + +func (r *resourceTableBucketPolicy) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("table_bucket_arn"), req, resp) +} + +func findTableBucketPolicy(ctx context.Context, conn *s3tables.Client, tableBucketARN string) (*s3tables.GetTableBucketPolicyOutput, error) { + in := &s3tables.GetTableBucketPolicyInput{ + TableBucketARN: aws.String(tableBucketARN), + } + + out, err := conn.GetTableBucketPolicy(ctx, in) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + return out, nil +} + +type resourceTableBucketPolicyModel struct { + ResourcePolicy fwtypes.IAMPolicy `tfsdk:"resource_policy"` + TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` +} diff --git a/internal/service/s3tables/table_bucket_policy_test.go b/internal/service/s3tables/table_bucket_policy_test.go new file mode 100644 index 000000000000..573b3827e683 --- /dev/null +++ b/internal/service/s3tables/table_bucket_policy_test.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/s3tables" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3TablesTableBucketPolicy_basic(t *testing.T) { + ctx := acctest.Context(t) + + var tablebucketpolicy s3tables.GetTableBucketPolicyOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3tables_table_bucket_policy.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableBucketPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableBucketPolicyConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableBucketPolicyExists(ctx, resourceName, &tablebucketpolicy), + resource.TestCheckResourceAttrSet(resourceName, "resource_policy"), + resource.TestCheckResourceAttrPair(resourceName, "table_bucket_arn", "aws_s3tables_table_bucket.test", names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "table_bucket_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "table_bucket_arn", + ImportStateVerifyIgnore: []string{"resource_policy"}, + }, + }, + }) +} + +func TestAccS3TablesTableBucketPolicy_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var tablebucketpolicy s3tables.GetTableBucketPolicyOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3tables_table_bucket_policy.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableBucketPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableBucketPolicyConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableBucketPolicyExists(ctx, resourceName, &tablebucketpolicy), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.NewResourceTableBucketPolicy, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckTableBucketPolicyDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3tables_table_bucket_policy" { + continue + } + + _, err := tfs3tables.FindTableBucketPolicy(ctx, conn, rs.Primary.Attributes["table_bucket_arn"]) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTableBucketPolicy, rs.Primary.ID, err) + } + + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTableBucketPolicy, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckTableBucketPolicyExists(ctx context.Context, name string, tablebucketpolicy *s3tables.GetTableBucketPolicyOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucketPolicy, name, errors.New("not found")) + } + + if rs.Primary.Attributes["table_bucket_arn"] == "" { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucketPolicy, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + resp, err := tfs3tables.FindTableBucketPolicy(ctx, conn, rs.Primary.Attributes["table_bucket_arn"]) + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucketPolicy, rs.Primary.ID, err) + } + + *tablebucketpolicy = *resp + + return nil + } +} + +func testAccTableBucketPolicyConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_s3tables_table_bucket_policy" "test" { + resource_policy = data.aws_iam_policy_document.test.json + table_bucket_arn = aws_s3tables_table_bucket.test.arn +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["s3tables:*"] + principals { + type = "AWS" + identifiers = [data.aws_caller_identity.current.account_id] + } + resources = ["${aws_s3tables_table_bucket.test.arn}/*"] + } +} + +resource "aws_s3tables_table_bucket" "test" { + name = %[1]q +} + +data "aws_caller_identity" "current" {} +`, rName) +} diff --git a/website/docs/r/s3tables_table_bucket_policy.html.markdown b/website/docs/r/s3tables_table_bucket_policy.html.markdown new file mode 100644 index 000000000000..ded16afbddd4 --- /dev/null +++ b/website/docs/r/s3tables_table_bucket_policy.html.markdown @@ -0,0 +1,56 @@ +--- +subcategory: "S3 Tables" +layout: "aws" +page_title: "AWS: aws_s3tables_table_bucket_policy" +description: |- + Terraform resource for managing an AWS S3 Tables Table Bucket Policy. +--- + +# Resource: aws_s3tables_table_bucket_policy + +Terraform resource for managing an AWS S3 Tables Table Bucket Policy. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3tables_table_bucket_policy" "example" { + resource_policy = data.aws_iam_policy_document.example.json + table_bucket_arn = aws_s3tables_table_bucket.example.arn +} + +data "aws_iam_policy_document" "example" { + statement { + # ... + } +} + +resource "aws_s3tables_table_bucket" "test" { + name = "example-bucket" +} +``` + +## Argument Reference + +The following arguments are required: + +* `resource_policy` - (Required) Amazon Web Services resource-based policy document in JSON format. +* `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that owns this policy. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket Policy using the `table_bucket_arn`. For example: + +```terraform +import { + to = aws_s3tables_table_bucket_policy.example + id = "arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace" +} +``` + +Using `terraform import`, import S3 Tables Table Bucket Policy using the `table_bucket_arn`. For example: + +```console +% terraform import aws_s3tables_table_bucket_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' +``` From 7297abaf2b447cbb47c2ed02d74bab8db94e6a96 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Nov 2024 17:40:12 -0800 Subject: [PATCH 09/35] Adds sweeper for `aws_s3tables_table_bucket` --- internal/service/s3tables/sweep.go | 42 +++++++++++++++++++++++++++++ internal/sweep/register_gen_test.go | 2 ++ 2 files changed, 44 insertions(+) create mode 100644 internal/service/s3tables/sweep.go diff --git a/internal/service/s3tables/sweep.go b/internal/service/s3tables/sweep.go new file mode 100644 index 000000000000..b0a54ece30a8 --- /dev/null +++ b/internal/service/s3tables/sweep.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/sweep" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func RegisterSweepers() { + awsv2.Register("aws_s3tables_table_bucket", sweepTableBuckets) +} + +func sweepTableBuckets(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.S3TablesClient(ctx) + + var sweepResources []sweep.Sweepable + + pages := s3tables.NewListTableBucketsPaginator(conn, &s3tables.ListTableBucketsInput{}) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, bucket := range page.TableBuckets { + sweepResources = append(sweepResources, framework.NewSweepResource(newResourceTableBucket, client, + framework.NewAttribute(names.AttrARN, aws.ToString(bucket.Arn)), + )) + } + } + + return sweepResources, nil +} diff --git a/internal/sweep/register_gen_test.go b/internal/sweep/register_gen_test.go index cddf0cb01ce3..ee8cde72556c 100644 --- a/internal/sweep/register_gen_test.go +++ b/internal/sweep/register_gen_test.go @@ -134,6 +134,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/rum" "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" + "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" "github.com/hashicorp/terraform-provider-aws/internal/service/scheduler" "github.com/hashicorp/terraform-provider-aws/internal/service/schemas" @@ -301,6 +302,7 @@ func registerSweepers() { rum.RegisterSweepers() s3.RegisterSweepers() s3control.RegisterSweepers() + s3tables.RegisterSweepers() sagemaker.RegisterSweepers() scheduler.RegisterSweepers() schemas.RegisterSweepers() From 0a28a6b91901861e735c61755edd401b71356509 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Nov 2024 13:52:50 -0800 Subject: [PATCH 10/35] Focuses semgrep message on offending import line --- .ci/semgrep/aws/go-sdk-v1.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/semgrep/aws/go-sdk-v1.yml b/.ci/semgrep/aws/go-sdk-v1.yml index 211ff4e9ba0d..fd96c53d2331 100644 --- a/.ci/semgrep/aws/go-sdk-v1.yml +++ b/.ci/semgrep/aws/go-sdk-v1.yml @@ -14,6 +14,7 @@ rules: - metavariable-regex: metavariable: "$X" regex: '^github.com/aws/aws-sdk-go/.+$' + - focus-metavariable: $X severity: WARNING - id: aws-sdk-go-base-awsv1shim-imports From 2fb02180cd1a9a3b482e0cb9db765bc537273b2d Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Nov 2024 11:05:47 -0800 Subject: [PATCH 11/35] Adds resource `aws_s3tables_table` --- internal/service/s3tables/exports_test.go | 8 + internal/service/s3tables/namespace.go | 7 + internal/service/s3tables/namespace_test.go | 4 +- .../service/s3tables/service_package_gen.go | 4 + internal/service/s3tables/table.go | 413 ++++++++++++++++ .../service/s3tables/table_bucket_test.go | 4 +- internal/service/s3tables/table_test.go | 461 ++++++++++++++++++ website/docs/r/s3tables_table.html.markdown | 82 ++++ 8 files changed, 979 insertions(+), 4 deletions(-) create mode 100644 internal/service/s3tables/table.go create mode 100644 internal/service/s3tables/table_test.go create mode 100644 website/docs/r/s3tables_table.html.markdown diff --git a/internal/service/s3tables/exports_test.go b/internal/service/s3tables/exports_test.go index 8f2dae544232..4df1d54bbd76 100644 --- a/internal/service/s3tables/exports_test.go +++ b/internal/service/s3tables/exports_test.go @@ -5,12 +5,16 @@ package s3tables var ( NewResourceNamespace = newResourceNamespace + NewResourceTable = newResourceTable NewResourceTableBucket = newResourceTableBucket NewResourceTableBucketPolicy = newResourceTableBucketPolicy FindNamespace = findNamespace + FindTable = findTable FindTableBucket = findTableBucket FindTableBucketPolicy = findTableBucketPolicy + + TableIDFromTableARN = tableIDFromTableARN ) const ( @@ -19,3 +23,7 @@ const ( NamespaceIDSeparator = namespaceIDSeparator ) + +type ( + TableIdentifier = tableIdentifier +) diff --git a/internal/service/s3tables/namespace.go b/internal/service/s3tables/namespace.go index 7bf04f6fb012..dd5e75edde76 100644 --- a/internal/service/s3tables/namespace.go +++ b/internal/service/s3tables/namespace.go @@ -51,6 +51,7 @@ type resourceNamespace struct { func (r *resourceNamespace) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { resp.TypeName = "aws_s3tables_namespace" } + func (r *resourceNamespace) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ @@ -63,6 +64,9 @@ func (r *resourceNamespace) Schema(ctx context.Context, req resource.SchemaReque }, "created_by": schema.StringAttribute{ Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, names.AttrNamespace: schema.ListAttribute{ CustomType: fwtypes.ListOfStringType, @@ -83,6 +87,9 @@ func (r *resourceNamespace) Schema(ctx context.Context, req resource.SchemaReque }, names.AttrOwnerAccountID: schema.StringAttribute{ Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, "table_bucket_arn": schema.StringAttribute{ CustomType: fwtypes.ARNType, diff --git a/internal/service/s3tables/namespace_test.go b/internal/service/s3tables/namespace_test.go index e1a806e6cf58..d46f30f47356 100644 --- a/internal/service/s3tables/namespace_test.go +++ b/internal/service/s3tables/namespace_test.go @@ -47,10 +47,10 @@ func TestAccS3TablesNamespace_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckNamespaceExists(ctx, resourceName, &namespace), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), - acctest.CheckResourceAttrAccountID(resourceName, "created_by"), + acctest.CheckResourceAttrAccountID(ctx, resourceName, "created_by"), resource.TestCheckResourceAttr(resourceName, "namespace.#", "1"), resource.TestCheckResourceAttr(resourceName, "namespace.0", rName), - acctest.CheckResourceAttrAccountID(resourceName, names.AttrOwnerAccountID), + acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), resource.TestCheckResourceAttrPair(resourceName, "table_bucket_arn", "aws_s3tables_table_bucket.test", names.AttrARN), ), }, diff --git a/internal/service/s3tables/service_package_gen.go b/internal/service/s3tables/service_package_gen.go index 6b0b8bce0a63..1d26460b5221 100644 --- a/internal/service/s3tables/service_package_gen.go +++ b/internal/service/s3tables/service_package_gen.go @@ -24,6 +24,10 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic Factory: newResourceNamespace, Name: "Namespace", }, + { + Factory: newResourceTable, + Name: "Table", + }, { Factory: newResourceTableBucket, Name: "Table Bucket", diff --git a/internal/service/s3tables/table.go b/internal/service/s3tables/table.go new file mode 100644 index 000000000000..092fa2ac6cc9 --- /dev/null +++ b/internal/service/s3tables/table.go @@ -0,0 +1,413 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_s3tables_table", name="Table") +func newResourceTable(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceTable{}, nil +} + +const ( + ResNameTable = "Table" +) + +type resourceTable struct { + framework.ResourceWithConfigure +} + +func (r *resourceTable) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_s3tables_table" +} + +func (r *resourceTable) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrCreatedAt: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "created_by": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrFormat: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.OpenTableFormat](), + Required: true, + // TODO: Only one format is currently supported. When a new value is added, we can determine if `format` can be changed in-place or must recreate the resource + }, + "metadata_location": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "modified_at": schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + }, + "modified_by": schema.StringAttribute{ + Computed: true, + }, + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z_]+$`), "must contain only lowercase letters, numbers, or underscores"), + stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z]`), "must start with a letter or number"), + stringvalidator.RegexMatches(regexache.MustCompile(`[0-9a-z]$`), "must end with a letter or number"), + }, + }, + names.AttrNamespace: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z_]+$`), "must contain only lowercase letters, numbers, or underscores"), + stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z]`), "must start with a letter or number"), + stringvalidator.RegexMatches(regexache.MustCompile(`[0-9a-z]$`), "must end with a letter or number"), + }, + }, + names.AttrOwnerAccountID: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "table_bucket_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrType: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.TableType](), + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "version_token": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "warehouse_location": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + } +} + +func (r *resourceTable) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var plan resourceTableModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var input s3tables.CreateTableInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + input.Namespace = plan.Namespace.ValueStringPointer() + + _, err := conn.CreateTable(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTable, plan.Name.String(), err), + err.Error(), + ) + return + } + + table, err := findTable(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTable, plan.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, table, &plan, flex.WithFieldNamePrefix("Table"))...) + if resp.Diagnostics.HasError() { + return + } + plan.Namespace = types.StringValue(table.Namespace[0]) + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceTable) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceTableModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findTable(ctx, conn, state.TableBucketARN.ValueString(), state.Namespace.ValueString(), state.Name.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionSetting, ResNameTable, state.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state, flex.WithFieldNamePrefix("Table"))...) + if resp.Diagnostics.HasError() { + return + } + state.Namespace = types.StringValue(out.Namespace[0]) + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceTable) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var plan, state resourceTableModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + if !plan.Name.Equal(state.Name) || !plan.Namespace.Equal(state.Namespace) { + input := s3tables.RenameTableInput{ + TableBucketARN: state.TableBucketARN.ValueStringPointer(), + Namespace: state.Namespace.ValueStringPointer(), + Name: state.Name.ValueStringPointer(), + } + + if !plan.Name.Equal(state.Name) { + input.NewName = plan.Name.ValueStringPointer() + } + + if !plan.Namespace.Equal(state.Namespace) { + input.NewNamespaceName = plan.Namespace.ValueStringPointer() + } + + _, err := conn.RenameTable(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, ResNameTable, state.Name.String(), err), + err.Error(), + ) + } + + table, err := findTable(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTable, plan.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, table, &plan, flex.WithFieldNamePrefix("Table"))...) + if resp.Diagnostics.HasError() { + return + } + plan.Namespace = types.StringValue(table.Namespace[0]) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceTable) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceTableModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := s3tables.DeleteTableInput{ + Name: state.Name.ValueStringPointer(), + Namespace: state.Namespace.ValueStringPointer(), + TableBucketARN: state.TableBucketARN.ValueStringPointer(), + } + + _, err := conn.DeleteTable(ctx, &input) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, ResNameTable, state.Name.String(), err), + err.Error(), + ) + return + } +} + +func (r *resourceTable) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + identifier, err := parseTableIdentifier(req.ID) + if err != nil { + resp.Diagnostics.AddError( + "Invalid Import ID", + "Import IDs for S3 Tables Tables must use the format
"+namespaceIDSeparator+""+namespaceIDSeparator+"
.\n"+ + fmt.Sprintf("Had %q", req.ID), + ) + return + } + + var state resourceTableModel + identifier.Populate(&state) + + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +func findTable(ctx context.Context, conn *s3tables.Client, bucketARN, namespace, name string) (*s3tables.GetTableOutput, error) { + in := &s3tables.GetTableInput{ + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(bucketARN), + } + + out, err := conn.GetTable(ctx, in) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +type resourceTableModel struct { + ARN types.String `tfsdk:"arn"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + CreatedBy types.String `tfsdk:"created_by"` + Format fwtypes.StringEnum[awstypes.OpenTableFormat] `tfsdk:"format"` + MetadataLocation types.String `tfsdk:"metadata_location"` + ModifiedAt timetypes.RFC3339 `tfsdk:"modified_at"` + ModifiedBy types.String `tfsdk:"modified_by"` + Name types.String `tfsdk:"name"` + Namespace types.String `tfsdk:"namespace" autoflex:"-"` + OwnerAccountID types.String `tfsdk:"owner_account_id"` + TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` + Type fwtypes.StringEnum[awstypes.TableType] `tfsdk:"type"` + VersionToken types.String `tfsdk:"version_token"` + WarehouseLocation types.String `tfsdk:"warehouse_location"` +} + +func tableIDFromTableARN(s string) (string, error) { + arn, err := arn.Parse(s) + if err != nil { + return "", err + } + + return tableIDFromTableARNResource(arn.Resource), nil +} + +func tableIDFromTableARNResource(s string) string { + parts := strings.SplitN(s, "/", 4) + return parts[3] +} + +type tableIdentifier struct { + TableBucketARN string + Namespace string + Name string +} + +const ( + tableIDSeparator = ";" + tableIDParts = 3 +) + +func parseTableIdentifier(s string) (tableIdentifier, error) { + parts := strings.Split(s, tableIDSeparator) + if len(parts) != tableIDParts { + return tableIdentifier{}, errors.New("not enough parts") + } + for i := range tableIDParts { + if parts[i] == "" { + return tableIdentifier{}, errors.New("empty part") + } + } + + return tableIdentifier{ + TableBucketARN: parts[0], + Namespace: parts[1], + Name: parts[2], + }, nil +} + +func (id tableIdentifier) String() string { + return id.TableBucketARN + tableIDSeparator + + id.Namespace + tableIDSeparator + + id.Name +} + +func (id tableIdentifier) Populate(m *resourceTableModel) { + m.TableBucketARN = fwtypes.ARNValue(id.TableBucketARN) + m.Namespace = types.StringValue(id.Namespace) + m.Name = types.StringValue(id.Name) +} diff --git a/internal/service/s3tables/table_bucket_test.go b/internal/service/s3tables/table_bucket_test.go index b44319222305..8609953feacf 100644 --- a/internal/service/s3tables/table_bucket_test.go +++ b/internal/service/s3tables/table_bucket_test.go @@ -41,10 +41,10 @@ func TestAccS3TablesTableBucket_basic(t *testing.T) { Config: testAccTableBucketConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), - acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "s3tables", "bucket/"+rName), + acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "s3tables", "bucket/"+rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - acctest.CheckResourceAttrAccountID(resourceName, names.AttrOwnerAccountID), + acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), ), }, { diff --git a/internal/service/s3tables/table_test.go b/internal/service/s3tables/table_test.go new file mode 100644 index 000000000000..8f72ef89b68f --- /dev/null +++ b/internal/service/s3tables/table_test.go @@ -0,0 +1,461 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3TablesTable_basic(t *testing.T) { + ctx := acctest.Context(t) + + var table s3tables.GetTableOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + namespace := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rName := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + resourceName := "aws_s3tables_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_basic(rName, namespace, bucketName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "s3tables", regexache.MustCompile("bucket/"+bucketName+"/table/"+verify.UUIDRegexPattern+"$")), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + acctest.CheckResourceAttrAccountID(ctx, resourceName, "created_by"), + resource.TestCheckResourceAttr(resourceName, names.AttrFormat, "ICEBERG"), + resource.TestCheckNoResourceAttr(resourceName, "metadata_location"), + resource.TestCheckResourceAttrPair(resourceName, "modified_at", resourceName, names.AttrCreatedAt), + resource.TestCheckNoResourceAttr(resourceName, "modified_by"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrPair(resourceName, names.AttrNamespace, "aws_s3tables_namespace.test", "namespace.0"), + acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), + resource.TestCheckResourceAttrPair(resourceName, "table_bucket_arn", "aws_s3tables_table_bucket.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.TableTypeCustomer)), + resource.TestCheckResourceAttrSet(resourceName, "version_token"), + func(s *terraform.State) error { + tableID, err := tfs3tables.TableIDFromTableARN(aws.ToString(table.TableARN)) + if err != nil { + return err + } + return resource.TestMatchResourceAttr(resourceName, "warehouse_location", regexache.MustCompile("^s3://"+tableID[:19]+".+--table-s3$"))(s) + }, + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrName, + }, + }, + }) +} + +func TestAccS3TablesTable_disappears(t *testing.T) { + ctx := acctest.Context(t) + + var table s3tables.GetTableOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + namespace := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rName := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + resourceName := "aws_s3tables_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_basic(rName, namespace, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.NewResourceTable, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3TablesTable_rename(t *testing.T) { + ctx := acctest.Context(t) + + var table s3tables.GetTableOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + namespace := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rName := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rNameUpdated := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + resourceName := "aws_s3tables_table.test" + + createdAtNoChange := statecheck.CompareValue(compare.ValuesSame()) + createdByNoChange := statecheck.CompareValue(compare.ValuesSame()) + modifiedAtChange := statecheck.CompareValue(compare.ValuesDiffer()) + modifiedByChange := statecheck.CompareValue(compare.ValuesDiffer()) + versionNoChange := statecheck.CompareValue(compare.ValuesSame()) + warehouseLocationNoChange := statecheck.CompareValue(compare.ValuesSame()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_basic(rName, namespace, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + createdAtNoChange.AddStateValue(resourceName, tfjsonpath.New(names.AttrCreatedAt)), + createdByNoChange.AddStateValue(resourceName, tfjsonpath.New("created_by")), + modifiedAtChange.AddStateValue(resourceName, tfjsonpath.New("modified_at")), + modifiedByChange.AddStateValue(resourceName, tfjsonpath.New("modified_by")), + versionNoChange.AddStateValue(resourceName, tfjsonpath.New("version_token")), + warehouseLocationNoChange.AddStateValue(resourceName, tfjsonpath.New("warehouse_location")), + }, + }, + { + Config: testAccTableConfig_basic(rNameUpdated, namespace, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rNameUpdated), + resource.TestCheckResourceAttrSet(resourceName, "modified_at"), + acctest.CheckResourceAttrAccountID(ctx, resourceName, "modified_by"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(rNameUpdated)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + createdAtNoChange.AddStateValue(resourceName, tfjsonpath.New(names.AttrCreatedAt)), + createdByNoChange.AddStateValue(resourceName, tfjsonpath.New("created_by")), + modifiedAtChange.AddStateValue(resourceName, tfjsonpath.New("modified_at")), + modifiedByChange.AddStateValue(resourceName, tfjsonpath.New("modified_by")), + versionNoChange.AddStateValue(resourceName, tfjsonpath.New("version_token")), + warehouseLocationNoChange.AddStateValue(resourceName, tfjsonpath.New("warehouse_location")), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrName, + }, + }, + }) +} + +func TestAccS3TablesTable_updateNamespace(t *testing.T) { + ctx := acctest.Context(t) + + var table s3tables.GetTableOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + namespace := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rName := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + namespaceUpdated := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + resourceName := "aws_s3tables_table.test" + + createdAtNoChange := statecheck.CompareValue(compare.ValuesSame()) + createdByNoChange := statecheck.CompareValue(compare.ValuesSame()) + modifiedAtChange := statecheck.CompareValue(compare.ValuesDiffer()) + modifiedByChange := statecheck.CompareValue(compare.ValuesDiffer()) + versionNoChange := statecheck.CompareValue(compare.ValuesSame()) + warehouseLocationNoChange := statecheck.CompareValue(compare.ValuesSame()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_basic(rName, namespace, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, namespace), + ), + ConfigStateChecks: []statecheck.StateCheck{ + createdAtNoChange.AddStateValue(resourceName, tfjsonpath.New(names.AttrCreatedAt)), + createdByNoChange.AddStateValue(resourceName, tfjsonpath.New("created_by")), + modifiedAtChange.AddStateValue(resourceName, tfjsonpath.New("modified_at")), + modifiedByChange.AddStateValue(resourceName, tfjsonpath.New("modified_by")), + versionNoChange.AddStateValue(resourceName, tfjsonpath.New("version_token")), + warehouseLocationNoChange.AddStateValue(resourceName, tfjsonpath.New("warehouse_location")), + }, + }, + { + Config: testAccTableConfig_basic(rName, namespaceUpdated, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, namespaceUpdated), + resource.TestCheckResourceAttrSet(resourceName, "modified_at"), + acctest.CheckResourceAttrAccountID(ctx, resourceName, "modified_by"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrNamespace), knownvalue.StringExact(namespaceUpdated)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + createdAtNoChange.AddStateValue(resourceName, tfjsonpath.New(names.AttrCreatedAt)), + createdByNoChange.AddStateValue(resourceName, tfjsonpath.New("created_by")), + modifiedAtChange.AddStateValue(resourceName, tfjsonpath.New("modified_at")), + modifiedByChange.AddStateValue(resourceName, tfjsonpath.New("modified_by")), + versionNoChange.AddStateValue(resourceName, tfjsonpath.New("version_token")), + warehouseLocationNoChange.AddStateValue(resourceName, tfjsonpath.New("warehouse_location")), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrName, + }, + }, + }) +} + +func TestAccS3TablesTable_updateNameAndNamespace(t *testing.T) { + ctx := acctest.Context(t) + + var table s3tables.GetTableOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + namespace := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rName := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + namespaceUpdated := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rNameUpdated := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + resourceName := "aws_s3tables_table.test" + + createdAtNoChange := statecheck.CompareValue(compare.ValuesSame()) + createdByNoChange := statecheck.CompareValue(compare.ValuesSame()) + modifiedAtChange := statecheck.CompareValue(compare.ValuesDiffer()) + modifiedByChange := statecheck.CompareValue(compare.ValuesDiffer()) + versionNoChange := statecheck.CompareValue(compare.ValuesSame()) + warehouseLocationNoChange := statecheck.CompareValue(compare.ValuesSame()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_basic(rName, namespace, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, namespace), + ), + ConfigStateChecks: []statecheck.StateCheck{ + createdAtNoChange.AddStateValue(resourceName, tfjsonpath.New(names.AttrCreatedAt)), + createdByNoChange.AddStateValue(resourceName, tfjsonpath.New("created_by")), + modifiedAtChange.AddStateValue(resourceName, tfjsonpath.New("modified_at")), + modifiedByChange.AddStateValue(resourceName, tfjsonpath.New("modified_by")), + versionNoChange.AddStateValue(resourceName, tfjsonpath.New("version_token")), + warehouseLocationNoChange.AddStateValue(resourceName, tfjsonpath.New("warehouse_location")), + }, + }, + { + Config: testAccTableConfig_basic(rNameUpdated, namespaceUpdated, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rNameUpdated), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, namespaceUpdated), + resource.TestCheckResourceAttrSet(resourceName, "modified_at"), + acctest.CheckResourceAttrAccountID(ctx, resourceName, "modified_by"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(rNameUpdated)), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrNamespace), knownvalue.StringExact(namespaceUpdated)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + createdAtNoChange.AddStateValue(resourceName, tfjsonpath.New(names.AttrCreatedAt)), + createdByNoChange.AddStateValue(resourceName, tfjsonpath.New("created_by")), + modifiedAtChange.AddStateValue(resourceName, tfjsonpath.New("modified_at")), + modifiedByChange.AddStateValue(resourceName, tfjsonpath.New("modified_by")), + versionNoChange.AddStateValue(resourceName, tfjsonpath.New("version_token")), + warehouseLocationNoChange.AddStateValue(resourceName, tfjsonpath.New("warehouse_location")), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrName, + }, + }, + }) +} + +func testAccCheckTableDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3tables_table" { + continue + } + + _, err := tfs3tables.FindTable(ctx, conn, + rs.Primary.Attributes["table_bucket_arn"], + rs.Primary.Attributes[names.AttrNamespace], + rs.Primary.Attributes[names.AttrName], + ) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTable, rs.Primary.ID, err) + } + + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTable, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckTableExists(ctx context.Context, name string, table *s3tables.GetTableOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTable, name, errors.New("not found")) + } + + if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes[names.AttrName] == "" { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTable, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + resp, err := tfs3tables.FindTable(ctx, conn, + rs.Primary.Attributes["table_bucket_arn"], + rs.Primary.Attributes[names.AttrNamespace], + rs.Primary.Attributes[names.AttrName], + ) + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTable, rs.Primary.ID, err) + } + + *table = *resp + + return nil + } +} + +func testAccTableImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("not found: %s", resourceName) + } + + identifier := tfs3tables.TableIdentifier{ + TableBucketARN: rs.Primary.Attributes["table_bucket_arn"], + Namespace: rs.Primary.Attributes[names.AttrNamespace], + Name: rs.Primary.Attributes[names.AttrName], + } + + return identifier.String(), nil + } +} + +func testAccTableConfig_basic(rName, namespace, bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3tables_table" "test" { + name = %[1]q + namespace = aws_s3tables_namespace.test.namespace[0] + table_bucket_arn = aws_s3tables_namespace.test.table_bucket_arn + format = "ICEBERG" +} + +resource "aws_s3tables_namespace" "test" { + namespace = [%[2]q] + table_bucket_arn = aws_s3tables_table_bucket.test.arn + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_s3tables_table_bucket" "test" { + name = %[3]q +} +`, rName, namespace, bucketName) +} diff --git a/website/docs/r/s3tables_table.html.markdown b/website/docs/r/s3tables_table.html.markdown new file mode 100644 index 000000000000..a3db7ad3daa4 --- /dev/null +++ b/website/docs/r/s3tables_table.html.markdown @@ -0,0 +1,82 @@ +--- +subcategory: "S3 Tables" +layout: "aws" +page_title: "AWS: aws_s3tables_table" +description: |- + Terraform resource for managing an AWS S3 Tables Table. +--- + +# Resource: aws_s3tables_table + +Terraform resource for managing an AWS S3 Tables Table. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3tables_table" "example" { + name = "example-table" + namespace = aws_s3tables_namespace.example + table_bucket_arn = aws_s3tables_namespace.example.table_bucket_arn + format = "ICEBERG" +} + +resource "aws_s3tables_namespace" "example" { + namespace = ["example-namespace"] + table_bucket_arn = aws_s3tables_table_bucket.example.arn +} + +resource "aws_s3tables_table_bucket" "example" { + name = "example-bucket" +} +``` + +## Argument Reference + +The following arguments are required: + +* `format` - (Required) Format of the table. + Must be `ICEBERG`. +* `name` - (Required) Name of the table. + Must be between 1 and 255 characters in length. + Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. +* `namespace` - (Required) Name of the namespace for this table. + Must be between 1 and 255 characters in length. + Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. +* `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that contains this Namespace. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the table. +* `created_at` - Date and time when the namespace was created. +* `created_by` - Account ID of the account that created the namespace. +* `metadata_location` - Location of table metadata. +* `modified_at` - Date and time when the namespace was last modified. +* `modified_by` - Account ID of the account that last modified the namespace. +* `owner_account_id` - Account ID of the account that owns the namespace. +* `type` - Type of the table. + One of `customer` or `aws`. +* `version_token` - Identifier for the current version of table data. +* `warehouse_location` - S3 URI pointing to the S3 Bucket that contains the table data. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). +For example: + +```terraform +import { + to = aws_s3tables_table.example + id = "arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table" +} +``` + +Using `terraform import`, import S3 Tables Table using the `example_id_arg`. +For example: + +```console +% terraform import aws_s3tables_table.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table' +``` From 5253a1a413f9bcbe48589fd53e95e198c6690f0a Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Nov 2024 11:07:06 -0800 Subject: [PATCH 12/35] Documentation tweaks --- website/docs/r/s3tables_namespace.html.markdown | 6 ++++-- website/docs/r/s3tables_table_bucket.html.markdown | 6 ++++-- website/docs/r/s3tables_table_bucket_policy.html.markdown | 6 ++++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/website/docs/r/s3tables_namespace.html.markdown b/website/docs/r/s3tables_namespace.html.markdown index 9919b941bc8d..48b3193bdbb0 100644 --- a/website/docs/r/s3tables_namespace.html.markdown +++ b/website/docs/r/s3tables_namespace.html.markdown @@ -45,7 +45,8 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). +For example: ```terraform import { @@ -54,7 +55,8 @@ import { } ``` -Using `terraform import`, import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). For example: +Using `terraform import`, import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). +For example: ```console % terraform import aws_s3tables_namespace.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' diff --git a/website/docs/r/s3tables_table_bucket.html.markdown b/website/docs/r/s3tables_table_bucket.html.markdown index 09a22120a9af..8e4a6cd2050f 100644 --- a/website/docs/r/s3tables_table_bucket.html.markdown +++ b/website/docs/r/s3tables_table_bucket.html.markdown @@ -39,7 +39,8 @@ This resource exports the following attributes in addition to the argument above ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket using the `arn`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket using the `arn`. +For example: ```terraform import { @@ -48,7 +49,8 @@ import { } ``` -Using `terraform import`, import S3 Tables Table Bucket using the `arn`. For example: +Using `terraform import`, import S3 Tables Table Bucket using the `arn`. +For example: ```console % terraform import aws_s3tables_table_bucket.example arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket diff --git a/website/docs/r/s3tables_table_bucket_policy.html.markdown b/website/docs/r/s3tables_table_bucket_policy.html.markdown index ded16afbddd4..a0652701ade6 100644 --- a/website/docs/r/s3tables_table_bucket_policy.html.markdown +++ b/website/docs/r/s3tables_table_bucket_policy.html.markdown @@ -40,7 +40,8 @@ The following arguments are required: ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket Policy using the `table_bucket_arn`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket Policy using the `table_bucket_arn`. +For example: ```terraform import { @@ -49,7 +50,8 @@ import { } ``` -Using `terraform import`, import S3 Tables Table Bucket Policy using the `table_bucket_arn`. For example: +Using `terraform import`, import S3 Tables Table Bucket Policy using the `table_bucket_arn`. +For example: ```console % terraform import aws_s3tables_table_bucket_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' From 5dc279988dda4f76704fc83869d1efe4b55dd093 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Nov 2024 11:07:38 -0800 Subject: [PATCH 13/35] Adds sweepers for `aws_s3tables_namespace` and `aws_s3tables_table` --- internal/service/s3tables/sweep.go | 94 +++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) diff --git a/internal/service/s3tables/sweep.go b/internal/service/s3tables/sweep.go index b0a54ece30a8..d0caae2c1651 100644 --- a/internal/service/s3tables/sweep.go +++ b/internal/service/s3tables/sweep.go @@ -16,7 +16,99 @@ import ( ) func RegisterSweepers() { - awsv2.Register("aws_s3tables_table_bucket", sweepTableBuckets) + awsv2.Register("aws_s3tables_namespace", sweepNamespaces, + "aws_s3tables_table", + ) + + awsv2.Register("aws_s3tables_table", sweepTables) + + awsv2.Register("aws_s3tables_table_bucket", sweepTableBuckets, + "aws_s3tables_namespace", + ) +} + +func sweepNamespaces(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.S3TablesClient(ctx) + + var sweepResources []sweep.Sweepable + + tableBuckets := s3tables.NewListTableBucketsPaginator(conn, &s3tables.ListTableBucketsInput{}) + for tableBuckets.HasMorePages() { + page, err := tableBuckets.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, bucket := range page.TableBuckets { + namespaces := s3tables.NewListNamespacesPaginator(conn, &s3tables.ListNamespacesInput{ + TableBucketARN: bucket.Arn, + }) + for namespaces.HasMorePages() { + page, err := namespaces.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, namespace := range page.Namespaces { + sweepResources = append(sweepResources, framework.NewSweepResource(newResourceNamespace, client, + framework.NewAttribute("table_bucket_arn", aws.ToString(bucket.Arn)), + framework.NewAttribute(names.AttrNamespace, namespace.Namespace), + )) + } + } + } + } + + return sweepResources, nil +} + +func sweepTables(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.S3TablesClient(ctx) + + var sweepResources []sweep.Sweepable + + tableBuckets := s3tables.NewListTableBucketsPaginator(conn, &s3tables.ListTableBucketsInput{}) + for tableBuckets.HasMorePages() { + page, err := tableBuckets.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, bucket := range page.TableBuckets { + namespaces := s3tables.NewListNamespacesPaginator(conn, &s3tables.ListNamespacesInput{ + TableBucketARN: bucket.Arn, + }) + for namespaces.HasMorePages() { + page, err := namespaces.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, namespace := range page.Namespaces { + tables := s3tables.NewListTablesPaginator(conn, &s3tables.ListTablesInput{ + TableBucketARN: bucket.Arn, + Namespace: aws.String(namespace.Namespace[0]), + }) + for tables.HasMorePages() { + page, err := tables.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, table := range page.Tables { + sweepResources = append(sweepResources, framework.NewSweepResource(newResourceTable, client, + framework.NewAttribute("table_bucket_arn", aws.ToString(bucket.Arn)), + framework.NewAttribute(names.AttrNamespace, namespace.Namespace[0]), + framework.NewAttribute(names.AttrName, aws.ToString(table.Name)), + )) + } + } + } + } + } + } + + return sweepResources, nil } func sweepTableBuckets(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { From e9b03dd8b98fb7c3fd8496d9f4afbb51fe80d420 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Nov 2024 11:51:23 -0800 Subject: [PATCH 14/35] Consolidates name validation --- internal/service/s3tables/namespace.go | 12 +++++++---- internal/service/s3tables/table.go | 26 ++++++++++------------- internal/service/s3tables/table_bucket.go | 6 +++--- internal/service/s3tables/validate.go | 17 +++++++++++++++ 4 files changed, 39 insertions(+), 22 deletions(-) create mode 100644 internal/service/s3tables/validate.go diff --git a/internal/service/s3tables/namespace.go b/internal/service/s3tables/namespace.go index dd5e75edde76..f07479dc33d4 100644 --- a/internal/service/s3tables/namespace.go +++ b/internal/service/s3tables/namespace.go @@ -9,7 +9,6 @@ import ( "fmt" "strings" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3tables" awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" @@ -79,9 +78,7 @@ func (r *resourceNamespace) Schema(ctx context.Context, req resource.SchemaReque listvalidator.SizeAtLeast(1), listvalidator.SizeAtMost(1), listvalidator.ValueStringsAre( - stringvalidator.LengthBetween(1, 255), - stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z_]*$`), "must contain only lowercase letters, numbers, or underscores"), - stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z].*[0-9a-z]$`), "must start and end with a letter or number"), + namespaceNameValidator..., ), }, }, @@ -272,3 +269,10 @@ type resourceNamespaceModel struct { OwnerAccountID types.String `tfsdk:"owner_account_id"` TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` } + +var namespaceNameValidator = []validator.String{ + stringvalidator.LengthBetween(1, 255), + stringMustContainLowerCaseLettersNumbersUnderscores, + stringMustStartWithLetterOrNumber, + stringMustEndWithLetterOrNumber, +} diff --git a/internal/service/s3tables/table.go b/internal/service/s3tables/table.go index 092fa2ac6cc9..2a97a691bd00 100644 --- a/internal/service/s3tables/table.go +++ b/internal/service/s3tables/table.go @@ -9,7 +9,6 @@ import ( "fmt" "strings" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/s3tables" @@ -85,22 +84,12 @@ func (r *resourceTable) Schema(ctx context.Context, req resource.SchemaRequest, Computed: true, }, names.AttrName: schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.LengthBetween(1, 255), - stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z_]+$`), "must contain only lowercase letters, numbers, or underscores"), - stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z]`), "must start with a letter or number"), - stringvalidator.RegexMatches(regexache.MustCompile(`[0-9a-z]$`), "must end with a letter or number"), - }, + Required: true, + Validators: tableNameValidator, }, names.AttrNamespace: schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.LengthBetween(1, 255), - stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z_]+$`), "must contain only lowercase letters, numbers, or underscores"), - stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z]`), "must start with a letter or number"), - stringvalidator.RegexMatches(regexache.MustCompile(`[0-9a-z]$`), "must end with a letter or number"), - }, + Required: true, + Validators: namespaceNameValidator, }, names.AttrOwnerAccountID: schema.StringAttribute{ Computed: true, @@ -411,3 +400,10 @@ func (id tableIdentifier) Populate(m *resourceTableModel) { m.Namespace = types.StringValue(id.Namespace) m.Name = types.StringValue(id.Name) } + +var tableNameValidator = []validator.String{ + stringvalidator.LengthBetween(1, 255), + stringMustContainLowerCaseLettersNumbersUnderscores, + stringMustStartWithLetterOrNumber, + stringMustEndWithLetterOrNumber, +} diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go index 1c38b672da13..ab8e6f67c6d0 100644 --- a/internal/service/s3tables/table_bucket.go +++ b/internal/service/s3tables/table_bucket.go @@ -7,7 +7,6 @@ import ( "context" "errors" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3tables" awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" @@ -66,8 +65,9 @@ func (r *resourceTableBucket) Schema(ctx context.Context, req resource.SchemaReq }, Validators: []validator.String{ stringvalidator.LengthBetween(3, 63), - stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z-]*$`), "must contain only lowercase letters, numbers, or hyphens"), - stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z].*[0-9a-z]$`), "must start and end with a letter or number"), + stringMustContainLowerCaseLettersNumbersHypens, + stringMustStartWithLetterOrNumber, + stringMustEndWithLetterOrNumber, validators.PrefixNoneOf( "xn--", "sthree-", diff --git a/internal/service/s3tables/validate.go b/internal/service/s3tables/validate.go new file mode 100644 index 000000000000..be3a4af9db0d --- /dev/null +++ b/internal/service/s3tables/validate.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables + +import ( + "github.com/YakDriver/regexache" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" +) + +var ( + stringMustContainLowerCaseLettersNumbersHypens = stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z-]+$`), "must contain only lowercase letters, numbers, or hyphens") + stringMustContainLowerCaseLettersNumbersUnderscores = stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z_]+$`), "must contain only lowercase letters, numbers, or underscores") + + stringMustStartWithLetterOrNumber = stringvalidator.RegexMatches(regexache.MustCompile(`^[0-9a-z]`), "must start with a letter or number") + stringMustEndWithLetterOrNumber = stringvalidator.RegexMatches(regexache.MustCompile(`[0-9a-z]$`), "must end with a letter or number") +) From 7ca5abbdf19dcf1c17eb095af36e1405c7b463b4 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 21 Nov 2024 10:50:35 -0800 Subject: [PATCH 15/35] Adds knownvalue check that takes any string-like value --- internal/acctest/knownvalue/stringable_value.go | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 internal/acctest/knownvalue/stringable_value.go diff --git a/internal/acctest/knownvalue/stringable_value.go b/internal/acctest/knownvalue/stringable_value.go new file mode 100644 index 000000000000..53d828d0c13e --- /dev/null +++ b/internal/acctest/knownvalue/stringable_value.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statecheck + +import "github.com/hashicorp/terraform-plugin-testing/knownvalue" + +func StringExact[T ~string](value T) knownvalue.Check { + return knownvalue.StringExact(string(value)) +} From 35a2e593510ea546ea7bca30c9c00c2786ba6aa0 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 22 Nov 2024 11:33:39 -0800 Subject: [PATCH 16/35] Adds `maintenance_configuration` to `aws_s3tables_table_bucket` --- internal/service/s3tables/table_bucket.go | 257 +++++++++++++++++- .../service/s3tables/table_bucket_test.go | 126 +++++++++ .../r/s3tables_table_bucket.html.markdown | 30 ++ 3 files changed, 407 insertions(+), 6 deletions(-) diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go index ab8e6f67c6d0..0d167b14b07c 100644 --- a/internal/service/s3tables/table_bucket.go +++ b/internal/service/s3tables/table_bucket.go @@ -12,6 +12,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -19,11 +20,13 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -40,7 +43,6 @@ const ( type resourceTableBucket struct { framework.ResourceWithConfigure - framework.WithNoUpdate } func (r *resourceTableBucket) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { @@ -58,6 +60,15 @@ func (r *resourceTableBucket) Schema(ctx context.Context, req resource.SchemaReq stringplanmodifier.UseStateForUnknown(), }, }, + // TODO: Once Protocol v6 is supported, convert this to a `schema.SingleNestedAttribute` with full schema information + // Validations needed: + // * iceberg_unreferenced_file_removal.settings.non_current_days: int32validator.AtLeast(1) + // * iceberg_unreferenced_file_removal.settings.unreferenced_days: int32validator.AtLeast(1) + "maintenance_configuration": schema.ObjectAttribute{ + CustomType: fwtypes.NewObjectTypeOf[tableBucketMaintenanceConfigurationModel](ctx), + Optional: true, + Computed: true, + }, names.AttrName: schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -84,6 +95,9 @@ func (r *resourceTableBucket) Schema(ctx context.Context, req resource.SchemaReq }, names.AttrOwnerAccountID: schema.StringAttribute{ Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, }, } @@ -120,6 +134,38 @@ func (r *resourceTableBucket) Create(ctx context.Context, req resource.CreateReq return } + if !plan.MaintenanceConfiguration.IsUnknown() && !plan.MaintenanceConfiguration.IsNull() { + mc, d := plan.MaintenanceConfiguration.ToPtr(ctx) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + + if !mc.IcebergUnreferencedFileRemovalSettings.IsNull() { + input := s3tables.PutTableBucketMaintenanceConfigurationInput{ + TableBucketARN: out.Arn, + Type: awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval, + } + + value, d := expandTableBucketMaintenanceConfigurationValue(ctx, mc.IcebergUnreferencedFileRemovalSettings) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + + input.Value = &value + + _, err := conn.PutTableBucketMaintenanceConfiguration(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + return + } + } + } + bucket, err := findTableBucket(ctx, conn, aws.ToString(out.Arn)) if err != nil { resp.Diagnostics.AddError( @@ -133,6 +179,22 @@ func (r *resourceTableBucket) Create(ctx context.Context, req resource.CreateReq return } + awsMaintenanceConfig, err := conn.GetTableBucketMaintenanceConfiguration(ctx, &s3tables.GetTableBucketMaintenanceConfigurationInput{ + TableBucketARN: bucket.Arn, + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + } + maintenanceConfiguration, d := flattenTableBucketMaintenanceConfiguration(ctx, awsMaintenanceConfig) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + plan.MaintenanceConfiguration = maintenanceConfiguration + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) } @@ -152,7 +214,7 @@ func (r *resourceTableBucket) Read(ctx context.Context, req resource.ReadRequest } if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionSetting, resNameTableBucket, state.Name.String(), err), + create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, state.Name.String(), err), err.Error(), ) return @@ -163,9 +225,89 @@ func (r *resourceTableBucket) Read(ctx context.Context, req resource.ReadRequest return } + awsMaintenanceConfig, err := conn.GetTableBucketMaintenanceConfiguration(ctx, &s3tables.GetTableBucketMaintenanceConfigurationInput{ + TableBucketARN: out.Arn, + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, state.Name.String(), err), + err.Error(), + ) + } + maintenanceConfiguration, d := flattenTableBucketMaintenanceConfiguration(ctx, awsMaintenanceConfig) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + state.MaintenanceConfiguration = maintenanceConfiguration + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } +func (r *resourceTableBucket) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var state, plan resourceTableBucketModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + if !state.MaintenanceConfiguration.Equal(plan.MaintenanceConfiguration) { + conn := r.Meta().S3TablesClient(ctx) + + mc, d := plan.MaintenanceConfiguration.ToPtr(ctx) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + + if !mc.IcebergUnreferencedFileRemovalSettings.IsNull() { + input := s3tables.PutTableBucketMaintenanceConfigurationInput{ + TableBucketARN: state.ARN.ValueStringPointer(), + Type: awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval, + } + + value, d := expandTableBucketMaintenanceConfigurationValue(ctx, mc.IcebergUnreferencedFileRemovalSettings) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + + input.Value = &value + + _, err := conn.PutTableBucketMaintenanceConfiguration(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + return + } + } + + awsMaintenanceConfig, err := conn.GetTableBucketMaintenanceConfiguration(ctx, &s3tables.GetTableBucketMaintenanceConfigurationInput{ + TableBucketARN: state.ARN.ValueStringPointer(), + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + } + maintenanceConfiguration, d := flattenTableBucketMaintenanceConfiguration(ctx, awsMaintenanceConfig) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + plan.MaintenanceConfiguration = maintenanceConfiguration + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + func (r *resourceTableBucket) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { conn := r.Meta().S3TablesClient(ctx) @@ -221,8 +363,111 @@ func findTableBucket(ctx context.Context, conn *s3tables.Client, arn string) (*s } type resourceTableBucketModel struct { - ARN types.String `tfsdk:"arn"` - CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` - Name types.String `tfsdk:"name"` - OwnerAccountID types.String `tfsdk:"owner_account_id"` + ARN types.String `tfsdk:"arn"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + MaintenanceConfiguration fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationModel] `tfsdk:"maintenance_configuration" autoflex:"-"` + Name types.String `tfsdk:"name"` + OwnerAccountID types.String `tfsdk:"owner_account_id"` +} + +type tableBucketMaintenanceConfigurationModel struct { + IcebergUnreferencedFileRemovalSettings fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel] `tfsdk:"iceberg_unreferenced_file_removal"` +} + +type tableBucketMaintenanceConfigurationValueModel struct { + Settings fwtypes.ObjectValueOf[icebergUnreferencedFileRemovalSettingsModel] `tfsdk:"settings"` + Status fwtypes.StringEnum[awstypes.MaintenanceStatus] `tfsdk:"status"` +} + +type icebergUnreferencedFileRemovalSettingsModel struct { + NonCurrentDays types.Int32 `tfsdk:"non_current_days"` + UnreferencedDays types.Int32 `tfsdk:"unreferenced_days"` +} + +func flattenTableBucketMaintenanceConfiguration(ctx context.Context, in *s3tables.GetTableBucketMaintenanceConfigurationOutput) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationModel], diags diag.Diagnostics) { + icebergConfig := in.Configuration[string(awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval)] + + valueModel, d := flattenTableBucketMaintenanceConfigurationValue(ctx, &icebergConfig) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + model := tableBucketMaintenanceConfigurationModel{ + IcebergUnreferencedFileRemovalSettings: valueModel, + } + + result, d = fwtypes.NewObjectValueOf(ctx, &model) + diags.Append(d...) + return result, diags +} + +func expandTableBucketMaintenanceConfigurationValue(ctx context.Context, in fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel]) (result awstypes.TableBucketMaintenanceConfigurationValue, diags diag.Diagnostics) { + model, d := in.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + settings, d := expandIcebergUnreferencedFileRemovalSettings(ctx, model.Settings) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + result.Settings = settings + result.Status = model.Status.ValueEnum() + + return result, diags +} + +func flattenTableBucketMaintenanceConfigurationValue(ctx context.Context, in *awstypes.TableBucketMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel], diags diag.Diagnostics) { + iceberg, d := flattenIcebergUnreferencedFileRemovalSettings(ctx, in.Settings) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + model := tableBucketMaintenanceConfigurationValueModel{ + Settings: iceberg, + Status: fwtypes.StringEnumValue(in.Status), + } + + result, d = fwtypes.NewObjectValueOf(ctx, &model) + diags.Append(d...) + return result, diags +} + +func expandIcebergUnreferencedFileRemovalSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergUnreferencedFileRemovalSettingsModel]) (result *awstypes.TableBucketMaintenanceSettingsMemberIcebergUnreferencedFileRemoval, diags diag.Diagnostics) { + model, d := in.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + var value awstypes.IcebergUnreferencedFileRemovalSettings + + diags.Append(flex.Expand(ctx, model, &value)...) + + return &awstypes.TableBucketMaintenanceSettingsMemberIcebergUnreferencedFileRemoval{ + Value: value, + }, diags +} + +func flattenIcebergUnreferencedFileRemovalSettings(ctx context.Context, in awstypes.TableBucketMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergUnreferencedFileRemovalSettingsModel], diags diag.Diagnostics) { + switch t := in.(type) { + case *awstypes.TableBucketMaintenanceSettingsMemberIcebergUnreferencedFileRemoval: + var model icebergUnreferencedFileRemovalSettingsModel + diags.Append(flex.Flatten(ctx, t.Value, &model)...) + result = fwtypes.NewObjectValueOfMust(ctx, &model) + + case *awstypes.UnknownUnionMember: + tflog.Warn(ctx, "Unexpected tagged union member", map[string]any{ + "tag": t.Tag, + }) + + default: + tflog.Warn(ctx, "Unexpected nil tagged union value") + } + return result, diags } diff --git a/internal/service/s3tables/table_bucket_test.go b/internal/service/s3tables/table_bucket_test.go index 8609953feacf..7fc6eb4dc851 100644 --- a/internal/service/s3tables/table_bucket_test.go +++ b/internal/service/s3tables/table_bucket_test.go @@ -10,10 +10,15 @@ import ( "testing" "github.com/aws/aws-sdk-go-v2/service/s3tables" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" @@ -46,6 +51,17 @@ func TestAccS3TablesTableBucket_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ + "iceberg_unreferenced_file_removal": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "non_current_days": knownvalue.Int32Exact(10), + "unreferenced_days": knownvalue.Int32Exact(3), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + })), + }, }, { ResourceName: resourceName, @@ -86,6 +102,98 @@ func TestAccS3TablesTableBucket_disappears(t *testing.T) { }) } +func TestAccS3TablesTableBucket_maintenanceConfiguration(t *testing.T) { + ctx := acctest.Context(t) + + var tablebucket s3tables.GetTableBucketOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3tables_table_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableBucketConfig_maintenanceConfiguration(rName, awstypes.MaintenanceStatusEnabled, 20, 6), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ + "iceberg_unreferenced_file_removal": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "non_current_days": knownvalue.Int32Exact(20), + "unreferenced_days": knownvalue.Int32Exact(6), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + Config: testAccTableBucketConfig_maintenanceConfiguration(rName, awstypes.MaintenanceStatusEnabled, 15, 4), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ + "iceberg_unreferenced_file_removal": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "non_current_days": knownvalue.Int32Exact(15), + "unreferenced_days": knownvalue.Int32Exact(4), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + Config: testAccTableBucketConfig_maintenanceConfiguration(rName, awstypes.MaintenanceStatusDisabled, 15, 4), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ + "iceberg_unreferenced_file_removal": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "non_current_days": knownvalue.Int32Exact(15), + "unreferenced_days": knownvalue.Int32Exact(4), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusDisabled), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + func testAccCheckTableBucketDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) @@ -141,3 +249,21 @@ resource "aws_s3tables_table_bucket" "test" { } `, rName) } + +func testAccTableBucketConfig_maintenanceConfiguration(rName string, status awstypes.MaintenanceStatus, nonCurrentDays, unreferencedDays int32) string { + return fmt.Sprintf(` +resource "aws_s3tables_table_bucket" "test" { + name = %[1]q + + maintenance_configuration = { + iceberg_unreferenced_file_removal = { + settings = { + non_current_days = %[3]d + unreferenced_days = %[4]d + } + status = %[2]q + } + } +} +`, rName, status, nonCurrentDays, unreferencedDays) +} diff --git a/website/docs/r/s3tables_table_bucket.html.markdown b/website/docs/r/s3tables_table_bucket.html.markdown index 8e4a6cd2050f..7084d6b20770 100644 --- a/website/docs/r/s3tables_table_bucket.html.markdown +++ b/website/docs/r/s3tables_table_bucket.html.markdown @@ -29,6 +29,36 @@ The following argument is required: Can consist of lowercase letters, numbers, and hyphens, and must begin and end with a lowercase letter or number. A full list of bucket naming rules may be found in [S3 Tables documentation](???). +The following argument is optional: + +* `maintenance_configuration` - (Optional) A single table bucket maintenance configuration block. + [See `maintenance_configuration` below](#maintenance_configuration) + +### maintenance_configuration + +The `maintenance_configuration` configuration block supports the following argument: + +* `iceberg_unreferenced_file_removal` - (Required) A single Iceberg unreferenced file removal settings block. + [See `iceberg_unreferenced_file_removal` below](#iceberg_unreferenced_file_removal) + +### `iceberg_unreferenced_file_removal` + +The `iceberg_unreferenced_file_removal` configuration block supports the following arguments: + +* `settings` - (Required) Settings for unreferenced file removal. + [See `iceberg_unreferenced_file_removal.settings` below](#iceberg_unreferenced_file_removalsettings) +* `status` - (Required) Whether the configuration is enabled. + Valid values are `enabled` and `disabled`. + +### `iceberg_unreferenced_file_removal.settings` + +The `iceberg_unreferenced_file_removal.settings` configuration block supports the following arguments: + +* `non_current_days` - (Required) Data objects marked for deletion are deleted after this many days. + Must be at least `1`. +* `unreferenced_days` - (Required) Unreferenced data objects are marked for deletion after this many days. + Must be at least `1`. + ## Attribute Reference This resource exports the following attributes in addition to the argument above: From 059bb19ca460dfdcc66533b4370105411ce45958 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 22 Nov 2024 16:35:57 -0800 Subject: [PATCH 17/35] Adds `maintenance_configuration` to `aws_s3tables_table` --- internal/service/s3tables/table.go | 442 ++++++++++++++++++-- internal/service/s3tables/table_bucket.go | 6 +- internal/service/s3tables/table_test.go | 148 ++++++- website/docs/r/s3tables_table.html.markdown | 48 +++ 4 files changed, 602 insertions(+), 42 deletions(-) diff --git a/internal/service/s3tables/table.go b/internal/service/s3tables/table.go index 2a97a691bd00..0678f57c8a21 100644 --- a/internal/service/s3tables/table.go +++ b/internal/service/s3tables/table.go @@ -15,12 +15,17 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -70,6 +75,19 @@ func (r *resourceTable) Schema(ctx context.Context, req resource.SchemaRequest, Required: true, // TODO: Only one format is currently supported. When a new value is added, we can determine if `format` can be changed in-place or must recreate the resource }, + // TODO: Once Protocol v6 is supported, convert this to a `schema.SingleNestedAttribute` with full schema information + // Validations needed: + // * iceberg_compaction.settings.target_file_size_mb: int32validator.Between(64, 512) + // * iceberg_snapshot_management.settings.max_snapshot_age_hours: int32validator.AtLeast(1) + // * iceberg_snapshot_management.settings.min_snapshots_to_keep: int32validator.AtLeast(1) + "maintenance_configuration": schema.ObjectAttribute{ + CustomType: fwtypes.NewObjectTypeOf[tableMaintenanceConfigurationModel](ctx), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + }, "metadata_location": schema.StringAttribute{ Computed: true, PlanModifiers: []planmodifier.String{ @@ -152,6 +170,64 @@ func (r *resourceTable) Create(ctx context.Context, req resource.CreateRequest, return } + if !plan.MaintenanceConfiguration.IsUnknown() && !plan.MaintenanceConfiguration.IsNull() { + mc, d := plan.MaintenanceConfiguration.ToPtr(ctx) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + + if !mc.IcebergCompaction.IsNull() { + input := s3tables.PutTableMaintenanceConfigurationInput{ + Name: plan.Name.ValueStringPointer(), + Namespace: plan.Namespace.ValueStringPointer(), + TableBucketARN: plan.TableBucketARN.ValueStringPointer(), + Type: awstypes.TableMaintenanceTypeIcebergCompaction, + } + + value, d := expandTableMaintenanceIcebergCompaction(ctx, mc.IcebergCompaction) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + input.Value = &value + + _, err := conn.PutTableMaintenanceConfiguration(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + return + } + } + + if !mc.IcebergSnapshotManagement.IsNull() { + input := s3tables.PutTableMaintenanceConfigurationInput{ + Name: plan.Name.ValueStringPointer(), + Namespace: plan.Namespace.ValueStringPointer(), + TableBucketARN: plan.TableBucketARN.ValueStringPointer(), + Type: awstypes.TableMaintenanceTypeIcebergSnapshotManagement, + } + + value, d := expandTableMaintenanceIcebergSnapshotManagement(ctx, mc.IcebergSnapshotManagement) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + input.Value = &value + + _, err := conn.PutTableMaintenanceConfiguration(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + return + } + } + } + table, err := findTable(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) if err != nil { resp.Diagnostics.AddError( @@ -167,6 +243,24 @@ func (r *resourceTable) Create(ctx context.Context, req resource.CreateRequest, } plan.Namespace = types.StringValue(table.Namespace[0]) + awsMaintenanceConfig, err := conn.GetTableMaintenanceConfiguration(ctx, &s3tables.GetTableMaintenanceConfigurationInput{ + Name: plan.Name.ValueStringPointer(), + Namespace: plan.Namespace.ValueStringPointer(), + TableBucketARN: plan.TableBucketARN.ValueStringPointer(), + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + } + maintenanceConfiguration, d := flattenTableMaintenanceConfiguration(ctx, awsMaintenanceConfig) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + plan.MaintenanceConfiguration = maintenanceConfiguration + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) } @@ -186,7 +280,7 @@ func (r *resourceTable) Read(ctx context.Context, req resource.ReadRequest, resp } if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionSetting, ResNameTable, state.Name.String(), err), + create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, ResNameTable, state.Name.String(), err), err.Error(), ) return @@ -198,6 +292,24 @@ func (r *resourceTable) Read(ctx context.Context, req resource.ReadRequest, resp } state.Namespace = types.StringValue(out.Namespace[0]) + awsMaintenanceConfig, err := conn.GetTableMaintenanceConfiguration(ctx, &s3tables.GetTableMaintenanceConfigurationInput{ + Name: state.Name.ValueStringPointer(), + Namespace: state.Namespace.ValueStringPointer(), + TableBucketARN: state.TableBucketARN.ValueStringPointer(), + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, state.Name.String(), err), + err.Error(), + ) + } + maintenanceConfiguration, d := flattenTableMaintenanceConfiguration(ctx, awsMaintenanceConfig) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + state.MaintenanceConfiguration = maintenanceConfiguration + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } @@ -233,23 +345,105 @@ func (r *resourceTable) Update(ctx context.Context, req resource.UpdateRequest, err.Error(), ) } + } - table, err := findTable(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTable, plan.Name.String(), err), - err.Error(), - ) + if !plan.MaintenanceConfiguration.Equal(state.MaintenanceConfiguration) { + planMC, d := plan.MaintenanceConfiguration.ToPtr(ctx) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { return } - resp.Diagnostics.Append(flex.Flatten(ctx, table, &plan, flex.WithFieldNamePrefix("Table"))...) + stateMC, d := state.MaintenanceConfiguration.ToPtr(ctx) + resp.Diagnostics.Append(d...) if resp.Diagnostics.HasError() { return } - plan.Namespace = types.StringValue(table.Namespace[0]) + + if !planMC.IcebergCompaction.Equal(stateMC.IcebergCompaction) { + input := s3tables.PutTableMaintenanceConfigurationInput{ + Name: plan.Name.ValueStringPointer(), + Namespace: plan.Namespace.ValueStringPointer(), + TableBucketARN: plan.TableBucketARN.ValueStringPointer(), + Type: awstypes.TableMaintenanceTypeIcebergCompaction, + } + + value, d := expandTableMaintenanceIcebergCompaction(ctx, planMC.IcebergCompaction) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + input.Value = &value + + _, err := conn.PutTableMaintenanceConfiguration(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + return + } + } + + if !planMC.IcebergSnapshotManagement.Equal(stateMC.IcebergSnapshotManagement) { + input := s3tables.PutTableMaintenanceConfigurationInput{ + Name: plan.Name.ValueStringPointer(), + Namespace: plan.Namespace.ValueStringPointer(), + TableBucketARN: plan.TableBucketARN.ValueStringPointer(), + Type: awstypes.TableMaintenanceTypeIcebergSnapshotManagement, + } + + value, d := expandTableMaintenanceIcebergSnapshotManagement(ctx, planMC.IcebergSnapshotManagement) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + input.Value = &value + + _, err := conn.PutTableMaintenanceConfiguration(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + return + } + } + } + + table, err := findTable(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, ResNameTable, plan.Name.String(), err), + err.Error(), + ) + return } + resp.Diagnostics.Append(flex.Flatten(ctx, table, &plan, flex.WithFieldNamePrefix("Table"))...) + if resp.Diagnostics.HasError() { + return + } + plan.Namespace = types.StringValue(table.Namespace[0]) + + awsMaintenanceConfig, err := conn.GetTableMaintenanceConfiguration(ctx, &s3tables.GetTableMaintenanceConfigurationInput{ + Name: plan.Name.ValueStringPointer(), + Namespace: plan.Namespace.ValueStringPointer(), + TableBucketARN: plan.TableBucketARN.ValueStringPointer(), + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), + err.Error(), + ) + } + maintenanceConfiguration, d := flattenTableMaintenanceConfiguration(ctx, awsMaintenanceConfig) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + plan.MaintenanceConfiguration = maintenanceConfiguration + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } @@ -293,14 +487,7 @@ func (r *resourceTable) ImportState(ctx context.Context, req resource.ImportStat return } - var state resourceTableModel - identifier.Populate(&state) - - diags := resp.State.Set(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } + identifier.PopulateState(ctx, &resp.State, &resp.Diagnostics) } func findTable(ctx context.Context, conn *s3tables.Client, bucketARN, namespace, name string) (*s3tables.GetTableOutput, error) { @@ -330,20 +517,205 @@ func findTable(ctx context.Context, conn *s3tables.Client, bucketARN, namespace, } type resourceTableModel struct { - ARN types.String `tfsdk:"arn"` - CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` - CreatedBy types.String `tfsdk:"created_by"` - Format fwtypes.StringEnum[awstypes.OpenTableFormat] `tfsdk:"format"` - MetadataLocation types.String `tfsdk:"metadata_location"` - ModifiedAt timetypes.RFC3339 `tfsdk:"modified_at"` - ModifiedBy types.String `tfsdk:"modified_by"` - Name types.String `tfsdk:"name"` - Namespace types.String `tfsdk:"namespace" autoflex:"-"` - OwnerAccountID types.String `tfsdk:"owner_account_id"` - TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` - Type fwtypes.StringEnum[awstypes.TableType] `tfsdk:"type"` - VersionToken types.String `tfsdk:"version_token"` - WarehouseLocation types.String `tfsdk:"warehouse_location"` + ARN types.String `tfsdk:"arn"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + CreatedBy types.String `tfsdk:"created_by"` + Format fwtypes.StringEnum[awstypes.OpenTableFormat] `tfsdk:"format"` + MaintenanceConfiguration fwtypes.ObjectValueOf[tableMaintenanceConfigurationModel] `tfsdk:"maintenance_configuration" autoflex:"-"` + MetadataLocation types.String `tfsdk:"metadata_location"` + ModifiedAt timetypes.RFC3339 `tfsdk:"modified_at"` + ModifiedBy types.String `tfsdk:"modified_by"` + Name types.String `tfsdk:"name"` + Namespace types.String `tfsdk:"namespace" autoflex:"-"` + OwnerAccountID types.String `tfsdk:"owner_account_id"` + TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` + Type fwtypes.StringEnum[awstypes.TableType] `tfsdk:"type"` + VersionToken types.String `tfsdk:"version_token"` + WarehouseLocation types.String `tfsdk:"warehouse_location"` +} + +type tableMaintenanceConfigurationModel struct { + IcebergCompaction fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergCompactionSettingsModel]] `tfsdk:"iceberg_compaction"` + IcebergSnapshotManagement fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergSnapshotManagementSettingsModel]] `tfsdk:"iceberg_snapshot_management"` +} + +type tableMaintenanceConfigurationValueModel[T any] struct { + Settings fwtypes.ObjectValueOf[T] `tfsdk:"settings"` + Status fwtypes.StringEnum[awstypes.MaintenanceStatus] `tfsdk:"status"` +} + +type icebergCompactionSettingsModel struct { + TargetFileSizeMB types.Int32 `tfsdk:"target_file_size_mb"` +} + +type icebergSnapshotManagementSettingsModel struct { + MaxSnapshotAgeHours types.Int32 `tfsdk:"max_snapshot_age_hours"` + MinSnapshotsToKeep types.Int32 `tfsdk:"min_snapshots_to_keep"` +} + +func flattenTableMaintenanceConfiguration(ctx context.Context, in *s3tables.GetTableMaintenanceConfigurationOutput) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationModel], diags diag.Diagnostics) { + compactionConfig := in.Configuration[string(awstypes.TableMaintenanceTypeIcebergCompaction)] + compactionConfigModel, d := flattenTableMaintenanceIcebergCompaction(ctx, &compactionConfig) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + snapshotManagementConfig := in.Configuration[string(awstypes.TableMaintenanceTypeIcebergSnapshotManagement)] + snapshotManagementConfigModel, d := flattenTableMaintenanceIcebergSnapshotManagement(ctx, &snapshotManagementConfig) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + model := tableMaintenanceConfigurationModel{ + IcebergCompaction: compactionConfigModel, + IcebergSnapshotManagement: snapshotManagementConfigModel, + } + + result, d = fwtypes.NewObjectValueOf(ctx, &model) + diags.Append(d...) + return result, diags +} + +func expandTableMaintenanceIcebergCompaction(ctx context.Context, in fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergCompactionSettingsModel]]) (result awstypes.TableMaintenanceConfigurationValue, diags diag.Diagnostics) { + model, d := in.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + settings, d := expandIcebergCompactionSettings(ctx, model.Settings) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + result.Settings = settings + result.Status = model.Status.ValueEnum() + + return result, diags +} + +func flattenTableMaintenanceIcebergCompaction(ctx context.Context, in *awstypes.TableMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergCompactionSettingsModel]], diags diag.Diagnostics) { + iceberg, d := flattenIcebergCompactionSettings(ctx, in.Settings) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + model := tableMaintenanceConfigurationValueModel[icebergCompactionSettingsModel]{ + Settings: iceberg, + Status: fwtypes.StringEnumValue(in.Status), + } + + result, d = fwtypes.NewObjectValueOf(ctx, &model) + diags.Append(d...) + return result, diags +} + +func expandIcebergCompactionSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergCompactionSettingsModel]) (result *awstypes.TableMaintenanceSettingsMemberIcebergCompaction, diags diag.Diagnostics) { + model, d := in.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + var value awstypes.IcebergCompactionSettings + + diags.Append(flex.Expand(ctx, model, &value)...) + + return &awstypes.TableMaintenanceSettingsMemberIcebergCompaction{ + Value: value, + }, diags +} + +func flattenIcebergCompactionSettings(ctx context.Context, in awstypes.TableMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergCompactionSettingsModel], diags diag.Diagnostics) { + switch t := in.(type) { + case *awstypes.TableMaintenanceSettingsMemberIcebergCompaction: + var model icebergCompactionSettingsModel + diags.Append(flex.Flatten(ctx, t.Value, &model)...) + result = fwtypes.NewObjectValueOfMust(ctx, &model) + + case *awstypes.UnknownUnionMember: + tflog.Warn(ctx, "Unexpected tagged union member", map[string]any{ + "tag": t.Tag, + }) + + default: + tflog.Warn(ctx, "Unexpected nil tagged union value") + } + return result, diags +} + +func expandTableMaintenanceIcebergSnapshotManagement(ctx context.Context, in fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergSnapshotManagementSettingsModel]]) (result awstypes.TableMaintenanceConfigurationValue, diags diag.Diagnostics) { + model, d := in.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + settings, d := expandIcebergSnapshotManagementSettings(ctx, model.Settings) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + result.Settings = settings + result.Status = model.Status.ValueEnum() + + return result, diags +} + +func flattenTableMaintenanceIcebergSnapshotManagement(ctx context.Context, in *awstypes.TableMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergSnapshotManagementSettingsModel]], diags diag.Diagnostics) { + iceberg, d := flattenIcebergSnapshotManagementSettings(ctx, in.Settings) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + model := tableMaintenanceConfigurationValueModel[icebergSnapshotManagementSettingsModel]{ + Settings: iceberg, + Status: fwtypes.StringEnumValue(in.Status), + } + + result, d = fwtypes.NewObjectValueOf(ctx, &model) + diags.Append(d...) + return result, diags +} + +func expandIcebergSnapshotManagementSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergSnapshotManagementSettingsModel]) (result *awstypes.TableMaintenanceSettingsMemberIcebergSnapshotManagement, diags diag.Diagnostics) { + model, d := in.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return result, diags + } + + var value awstypes.IcebergSnapshotManagementSettings + + diags.Append(flex.Expand(ctx, model, &value)...) + + return &awstypes.TableMaintenanceSettingsMemberIcebergSnapshotManagement{ + Value: value, + }, diags +} + +func flattenIcebergSnapshotManagementSettings(ctx context.Context, in awstypes.TableMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergSnapshotManagementSettingsModel], diags diag.Diagnostics) { + switch t := in.(type) { + case *awstypes.TableMaintenanceSettingsMemberIcebergSnapshotManagement: + var model icebergSnapshotManagementSettingsModel + diags.Append(flex.Flatten(ctx, t.Value, &model)...) + result = fwtypes.NewObjectValueOfMust(ctx, &model) + + case *awstypes.UnknownUnionMember: + tflog.Warn(ctx, "Unexpected tagged union member", map[string]any{ + "tag": t.Tag, + }) + + default: + tflog.Warn(ctx, "Unexpected nil tagged union value") + } + return result, diags } func tableIDFromTableARN(s string) (string, error) { @@ -395,10 +767,10 @@ func (id tableIdentifier) String() string { id.Name } -func (id tableIdentifier) Populate(m *resourceTableModel) { - m.TableBucketARN = fwtypes.ARNValue(id.TableBucketARN) - m.Namespace = types.StringValue(id.Namespace) - m.Name = types.StringValue(id.Name) +func (id tableIdentifier) PopulateState(ctx context.Context, s *tfsdk.State, diags *diag.Diagnostics) { + diags.Append(s.SetAttribute(ctx, path.Root("table_bucket_arn"), id.TableBucketARN)...) + diags.Append(s.SetAttribute(ctx, path.Root(names.AttrNamespace), id.Namespace)...) + diags.Append(s.SetAttribute(ctx, path.Root(names.AttrName), id.Name)...) } var tableNameValidator = []validator.String{ diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go index 0d167b14b07c..59b64e0b77f4 100644 --- a/internal/service/s3tables/table_bucket.go +++ b/internal/service/s3tables/table_bucket.go @@ -226,7 +226,7 @@ func (r *resourceTableBucket) Read(ctx context.Context, req resource.ReadRequest } awsMaintenanceConfig, err := conn.GetTableBucketMaintenanceConfiguration(ctx, &s3tables.GetTableBucketMaintenanceConfigurationInput{ - TableBucketARN: out.Arn, + TableBucketARN: state.ARN.ValueStringPointer(), }) if err != nil { resp.Diagnostics.AddError( @@ -385,9 +385,9 @@ type icebergUnreferencedFileRemovalSettingsModel struct { } func flattenTableBucketMaintenanceConfiguration(ctx context.Context, in *s3tables.GetTableBucketMaintenanceConfigurationOutput) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationModel], diags diag.Diagnostics) { - icebergConfig := in.Configuration[string(awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval)] + unreferencedFileRemovalConfig := in.Configuration[string(awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval)] - valueModel, d := flattenTableBucketMaintenanceConfigurationValue(ctx, &icebergConfig) + valueModel, d := flattenTableBucketMaintenanceConfigurationValue(ctx, &unreferencedFileRemovalConfig) diags.Append(d...) if diags.HasError() { return result, diags diff --git a/internal/service/s3tables/table_test.go b/internal/service/s3tables/table_test.go index 8f72ef89b68f..7469c4fbb69b 100644 --- a/internal/service/s3tables/table_test.go +++ b/internal/service/s3tables/table_test.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" @@ -74,13 +75,30 @@ func TestAccS3TablesTable_basic(t *testing.T) { return resource.TestMatchResourceAttr(resourceName, "warehouse_location", regexache.MustCompile("^s3://"+tableID[:19]+".+--table-s3$"))(s) }, ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ + "iceberg_compaction": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "target_file_size_mb": knownvalue.Int32Exact(512), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + "iceberg_snapshot_management": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "max_snapshot_age_hours": knownvalue.Int32Exact(120), + "min_snapshots_to_keep": knownvalue.Int32Exact(1), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + })), + }, }, { ResourceName: resourceName, ImportState: true, ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), ImportStateVerify: true, - ImportStateVerifyIdentifierAttribute: names.AttrName, + ImportStateVerifyIdentifierAttribute: names.AttrARN, }, }, }) @@ -191,7 +209,7 @@ func TestAccS3TablesTable_rename(t *testing.T) { ImportState: true, ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), ImportStateVerify: true, - ImportStateVerifyIdentifierAttribute: names.AttrName, + ImportStateVerifyIdentifierAttribute: names.AttrARN, }, }, }) @@ -272,7 +290,7 @@ func TestAccS3TablesTable_updateNamespace(t *testing.T) { ImportState: true, ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), ImportStateVerify: true, - ImportStateVerifyIdentifierAttribute: names.AttrName, + ImportStateVerifyIdentifierAttribute: names.AttrARN, }, }, }) @@ -357,7 +375,89 @@ func TestAccS3TablesTable_updateNameAndNamespace(t *testing.T) { ImportState: true, ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), ImportStateVerify: true, - ImportStateVerifyIdentifierAttribute: names.AttrName, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccS3TablesTable_maintenanceConfiguration(t *testing.T) { + ctx := acctest.Context(t) + + var table s3tables.GetTableOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + namespace := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rName := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + resourceName := "aws_s3tables_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_maintenanceConfiguration(rName, namespace, bucketName, 64, 24, 2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ + "iceberg_compaction": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "target_file_size_mb": knownvalue.Int32Exact(64), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + "iceberg_snapshot_management": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "max_snapshot_age_hours": knownvalue.Int32Exact(24), + "min_snapshots_to_keep": knownvalue.Int32Exact(2), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + Config: testAccTableConfig_maintenanceConfiguration(rName, namespace, bucketName, 128, 48, 1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableExists(ctx, resourceName, &table), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ + "iceberg_compaction": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "target_file_size_mb": knownvalue.Int32Exact(128), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + "iceberg_snapshot_management": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "settings": knownvalue.ObjectExact(map[string]knownvalue.Check{ + "max_snapshot_age_hours": knownvalue.Int32Exact(48), + "min_snapshots_to_keep": knownvalue.Int32Exact(1), + }), + names.AttrStatus: tfknownvalue.StringExact(awstypes.MaintenanceStatusEnabled), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccTableImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, }, }, }) @@ -459,3 +559,43 @@ resource "aws_s3tables_table_bucket" "test" { } `, rName, namespace, bucketName) } + +func testAccTableConfig_maintenanceConfiguration(rName, namespace, bucketName string, targetSize, maxSnapshotAge, minSnapshots int32) string { + return fmt.Sprintf(` +resource "aws_s3tables_table" "test" { + name = %[1]q + namespace = aws_s3tables_namespace.test.namespace[0] + table_bucket_arn = aws_s3tables_namespace.test.table_bucket_arn + format = "ICEBERG" + + maintenance_configuration = { + iceberg_compaction = { + settings = { + target_file_size_mb = %[4]d + } + status = "enabled" + } + iceberg_snapshot_management = { + settings = { + max_snapshot_age_hours = %[5]d + min_snapshots_to_keep = %[6]d + } + status = "enabled" + } + } +} + +resource "aws_s3tables_namespace" "test" { + namespace = [%[2]q] + table_bucket_arn = aws_s3tables_table_bucket.test.arn + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_s3tables_table_bucket" "test" { + name = %[3]q +} +`, rName, namespace, bucketName, targetSize, maxSnapshotAge, minSnapshots) +} diff --git a/website/docs/r/s3tables_table.html.markdown b/website/docs/r/s3tables_table.html.markdown index a3db7ad3daa4..d6af711a8e5f 100644 --- a/website/docs/r/s3tables_table.html.markdown +++ b/website/docs/r/s3tables_table.html.markdown @@ -46,6 +46,54 @@ The following arguments are required: Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. * `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that contains this Namespace. +The following argument is optional: + +* `maintenance_configuration` - (Optional) A single table bucket maintenance configuration block. + [See `maintenance_configuration` below](#maintenance_configuration) + +### maintenance_configuration + +The `maintenance_configuration` configuration block supports the following arguments: + +* `iceberg_compaction` - (Required) A single Iceberg compaction settings block. + [See `iceberg_compaction` below](#iceberg_compaction) +* `iceberg_snapshot_management` - (Required) A single Iceberg snapshot management settings block. + [See `iceberg_snapshot_management` below](#iceberg_snapshot_management) + +### `iceberg_compaction` + +The `iceberg_compaction` configuration block supports the following arguments: + +* `settings` - (Required) Settings for compaction. + [See `iceberg_compaction.settings` below](#iceberg_compactionsettings) +* `status` - (Required) Whether the configuration is enabled. + Valid values are `enabled` and `disabled`. + +### `iceberg_compaction.settings` + +The `iceberg_compaction.settings` configuration block supports the following argument: + +* `target_file_size_mb` - (Required) Data objects smaller than this size may be combined with others to improve query performance. + Must be between `64` and `512`. + +### `iceberg_snapshot_management` + +The `iceberg_snapshot_management` configuration block supports the following arguments: + +* `settings` - (Required) Settings for snapshot management. + [See `iceberg_snapshot_management.settings` below](#iceberg_snapshot_managementsettings) +* `status` - (Required) Whether the configuration is enabled. + Valid values are `enabled` and `disabled`. + +### `iceberg_snapshot_management.settings` + +The `iceberg_snapshot_management.settings` configuration block supports the following argument: + +* `max_snapshot_age_hours` - (Required) Snapshots older than this will be marked for deletiion. + Must be at least `1`. +* `min_snapshots_to_keep` - (Required) Minimum number of snapshots to keep. + Must be at least `1`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: From b2fa7a7fa5ac5395fe63a4c07c23ef541c861a06 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 22 Nov 2024 16:42:12 -0800 Subject: [PATCH 18/35] Corrects actions --- internal/service/s3tables/namespace.go | 2 +- internal/service/s3tables/table_bucket.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/s3tables/namespace.go b/internal/service/s3tables/namespace.go index f07479dc33d4..d387768b6175 100644 --- a/internal/service/s3tables/namespace.go +++ b/internal/service/s3tables/namespace.go @@ -167,7 +167,7 @@ func (r *resourceNamespace) Read(ctx context.Context, req resource.ReadRequest, } if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionSetting, resNameNamespace, state.Namespace.String(), err), + create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameNamespace, state.Namespace.String(), err), err.Error(), ) return diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go index 59b64e0b77f4..de32e100bfef 100644 --- a/internal/service/s3tables/table_bucket.go +++ b/internal/service/s3tables/table_bucket.go @@ -230,7 +230,7 @@ func (r *resourceTableBucket) Read(ctx context.Context, req resource.ReadRequest }) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, state.Name.String(), err), + create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, state.Name.String(), err), err.Error(), ) } @@ -281,7 +281,7 @@ func (r *resourceTableBucket) Update(ctx context.Context, req resource.UpdateReq _, err := conn.PutTableBucketMaintenanceConfiguration(ctx, &input) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), err.Error(), ) return @@ -293,7 +293,7 @@ func (r *resourceTableBucket) Update(ctx context.Context, req resource.UpdateReq }) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), + create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), err.Error(), ) } From ecef0ea1f6c0881bf280bd02a826356baf3c663d Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 22 Nov 2024 16:47:24 -0800 Subject: [PATCH 19/35] Adds identifier struct for namespace --- internal/service/s3tables/namespace.go | 59 +++++++++++++++++++------- internal/service/s3tables/table.go | 2 +- 2 files changed, 45 insertions(+), 16 deletions(-) diff --git a/internal/service/s3tables/namespace.go b/internal/service/s3tables/namespace.go index d387768b6175..0924d3703fe2 100644 --- a/internal/service/s3tables/namespace.go +++ b/internal/service/s3tables/namespace.go @@ -15,13 +15,15 @@ import ( "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" - "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" @@ -213,11 +215,9 @@ func (r *resourceNamespace) Delete(ctx context.Context, req resource.DeleteReque } } -const namespaceIDSeparator = ";" - func (r *resourceNamespace) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - parts := strings.Split(req.ID, namespaceIDSeparator) - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + identifier, err := parseNamespaceIdentifier(req.ID) + if err != nil { resp.Diagnostics.AddError( "Invalid Import ID", "Import IDs for S3 Tables Namespaces must use the format
"+namespaceIDSeparator+".\n"+ @@ -226,17 +226,9 @@ func (r *resourceNamespace) ImportState(ctx context.Context, req resource.Import return } - state := resourceNamespaceModel{ - TableBucketARN: fwtypes.ARNValue(parts[0]), - Namespace: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{types.StringValue(parts[1])}), - } - - diags := resp.State.Set(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } + identifier.PopulateState(ctx, &resp.State, &resp.Diagnostics) } + func findNamespace(ctx context.Context, conn *s3tables.Client, bucketARN, name string) (*s3tables.GetNamespaceOutput, error) { in := &s3tables.GetNamespaceInput{ Namespace: aws.String(name), @@ -276,3 +268,40 @@ var namespaceNameValidator = []validator.String{ stringMustStartWithLetterOrNumber, stringMustEndWithLetterOrNumber, } + +type namespaceIdentifier struct { + TableBucketARN string + Namespace string +} + +const ( + namespaceIDSeparator = ";" + namespaceIDParts = 2 +) + +func parseNamespaceIdentifier(s string) (namespaceIdentifier, error) { + parts := strings.Split(s, namespaceIDSeparator) + if len(parts) != namespaceIDParts { + return namespaceIdentifier{}, errors.New("not enough parts") + } + for i := range namespaceIDParts { + if parts[i] == "" { + return namespaceIdentifier{}, errors.New("empty part") + } + } + + return namespaceIdentifier{ + TableBucketARN: parts[0], + Namespace: parts[1], + }, nil +} + +func (id namespaceIdentifier) String() string { + return id.TableBucketARN + tableIDSeparator + + id.Namespace +} + +func (id namespaceIdentifier) PopulateState(ctx context.Context, s *tfsdk.State, diags *diag.Diagnostics) { + diags.Append(s.SetAttribute(ctx, path.Root("table_bucket_arn"), id.TableBucketARN)...) + diags.Append(s.SetAttribute(ctx, path.Root(names.AttrNamespace), []string{id.Namespace})...) +} diff --git a/internal/service/s3tables/table.go b/internal/service/s3tables/table.go index 0678f57c8a21..8025eadee067 100644 --- a/internal/service/s3tables/table.go +++ b/internal/service/s3tables/table.go @@ -481,7 +481,7 @@ func (r *resourceTable) ImportState(ctx context.Context, req resource.ImportStat if err != nil { resp.Diagnostics.AddError( "Invalid Import ID", - "Import IDs for S3 Tables Tables must use the format
"+namespaceIDSeparator+""+namespaceIDSeparator+"
.\n"+ + "Import IDs for S3 Tables Tables must use the format
"+tableIDSeparator+""+tableIDSeparator+"
.\n"+ fmt.Sprintf("Had %q", req.ID), ) return From 5d119dae0c4085d809d5118e6d9d258ff886d1ae Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 22 Nov 2024 16:48:07 -0800 Subject: [PATCH 20/35] Adds `UseStateForUnknown` for table bucket `maintenance_configuration` --- internal/service/s3tables/table_bucket.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go index de32e100bfef..21e7a48c6c77 100644 --- a/internal/service/s3tables/table_bucket.go +++ b/internal/service/s3tables/table_bucket.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -68,6 +69,9 @@ func (r *resourceTableBucket) Schema(ctx context.Context, req resource.SchemaReq CustomType: fwtypes.NewObjectTypeOf[tableBucketMaintenanceConfigurationModel](ctx), Optional: true, Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, }, names.AttrName: schema.StringAttribute{ Required: true, From 6fc0ee4edcf0d5aae8c082ae8b04582fb95af6ec Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 22 Nov 2024 16:59:13 -0800 Subject: [PATCH 21/35] Updates table bucket `maintenance_configuration` to match table --- internal/service/s3tables/table_bucket.go | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go index 21e7a48c6c77..362ff5b09ac8 100644 --- a/internal/service/s3tables/table_bucket.go +++ b/internal/service/s3tables/table_bucket.go @@ -151,7 +151,7 @@ func (r *resourceTableBucket) Create(ctx context.Context, req resource.CreateReq Type: awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval, } - value, d := expandTableBucketMaintenanceConfigurationValue(ctx, mc.IcebergUnreferencedFileRemovalSettings) + value, d := expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx, mc.IcebergUnreferencedFileRemovalSettings) resp.Diagnostics.Append(d...) if resp.Diagnostics.HasError() { return @@ -274,7 +274,7 @@ func (r *resourceTableBucket) Update(ctx context.Context, req resource.UpdateReq Type: awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval, } - value, d := expandTableBucketMaintenanceConfigurationValue(ctx, mc.IcebergUnreferencedFileRemovalSettings) + value, d := expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx, mc.IcebergUnreferencedFileRemovalSettings) resp.Diagnostics.Append(d...) if resp.Diagnostics.HasError() { return @@ -375,12 +375,12 @@ type resourceTableBucketModel struct { } type tableBucketMaintenanceConfigurationModel struct { - IcebergUnreferencedFileRemovalSettings fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel] `tfsdk:"iceberg_unreferenced_file_removal"` + IcebergUnreferencedFileRemovalSettings fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel[icebergUnreferencedFileRemovalSettingsModel]] `tfsdk:"iceberg_unreferenced_file_removal"` } -type tableBucketMaintenanceConfigurationValueModel struct { - Settings fwtypes.ObjectValueOf[icebergUnreferencedFileRemovalSettingsModel] `tfsdk:"settings"` - Status fwtypes.StringEnum[awstypes.MaintenanceStatus] `tfsdk:"status"` +type tableBucketMaintenanceConfigurationValueModel[T any] struct { + Settings fwtypes.ObjectValueOf[T] `tfsdk:"settings"` + Status fwtypes.StringEnum[awstypes.MaintenanceStatus] `tfsdk:"status"` } type icebergUnreferencedFileRemovalSettingsModel struct { @@ -390,15 +390,14 @@ type icebergUnreferencedFileRemovalSettingsModel struct { func flattenTableBucketMaintenanceConfiguration(ctx context.Context, in *s3tables.GetTableBucketMaintenanceConfigurationOutput) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationModel], diags diag.Diagnostics) { unreferencedFileRemovalConfig := in.Configuration[string(awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval)] - - valueModel, d := flattenTableBucketMaintenanceConfigurationValue(ctx, &unreferencedFileRemovalConfig) + unreferencedFileRemovalConfigModel, d := flattenTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx, &unreferencedFileRemovalConfig) diags.Append(d...) if diags.HasError() { return result, diags } model := tableBucketMaintenanceConfigurationModel{ - IcebergUnreferencedFileRemovalSettings: valueModel, + IcebergUnreferencedFileRemovalSettings: unreferencedFileRemovalConfigModel, } result, d = fwtypes.NewObjectValueOf(ctx, &model) @@ -406,7 +405,7 @@ func flattenTableBucketMaintenanceConfiguration(ctx context.Context, in *s3table return result, diags } -func expandTableBucketMaintenanceConfigurationValue(ctx context.Context, in fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel]) (result awstypes.TableBucketMaintenanceConfigurationValue, diags diag.Diagnostics) { +func expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx context.Context, in fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel[icebergUnreferencedFileRemovalSettingsModel]]) (result awstypes.TableBucketMaintenanceConfigurationValue, diags diag.Diagnostics) { model, d := in.ToPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -425,14 +424,14 @@ func expandTableBucketMaintenanceConfigurationValue(ctx context.Context, in fwty return result, diags } -func flattenTableBucketMaintenanceConfigurationValue(ctx context.Context, in *awstypes.TableBucketMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel], diags diag.Diagnostics) { +func flattenTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx context.Context, in *awstypes.TableBucketMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel[icebergUnreferencedFileRemovalSettingsModel]], diags diag.Diagnostics) { iceberg, d := flattenIcebergUnreferencedFileRemovalSettings(ctx, in.Settings) diags.Append(d...) if diags.HasError() { return result, diags } - model := tableBucketMaintenanceConfigurationValueModel{ + model := tableBucketMaintenanceConfigurationValueModel[icebergUnreferencedFileRemovalSettingsModel]{ Settings: iceberg, Status: fwtypes.StringEnumValue(in.Status), } From 17704dd743ceb46cde7eb7d9bc939f7fb0a4d23e Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 25 Nov 2024 14:13:01 -0800 Subject: [PATCH 22/35] Adds resource `aws_s3tables_table_policy` --- internal/service/s3tables/exports_test.go | 2 + .../service/s3tables/service_package_gen.go | 4 + internal/service/s3tables/table_policy.go | 264 ++++++++++++++++++ .../service/s3tables/table_policy_test.go | 210 ++++++++++++++ website/docs/r/s3tables_table.html.markdown | 2 +- 5 files changed, 481 insertions(+), 1 deletion(-) create mode 100644 internal/service/s3tables/table_policy.go create mode 100644 internal/service/s3tables/table_policy_test.go diff --git a/internal/service/s3tables/exports_test.go b/internal/service/s3tables/exports_test.go index 4df1d54bbd76..73ad6a1f1b5d 100644 --- a/internal/service/s3tables/exports_test.go +++ b/internal/service/s3tables/exports_test.go @@ -8,11 +8,13 @@ var ( NewResourceTable = newResourceTable NewResourceTableBucket = newResourceTableBucket NewResourceTableBucketPolicy = newResourceTableBucketPolicy + ResourceTablePolicy = newResourceTablePolicy FindNamespace = findNamespace FindTable = findTable FindTableBucket = findTableBucket FindTableBucketPolicy = findTableBucketPolicy + FindTablePolicy = findTablePolicy TableIDFromTableARN = tableIDFromTableARN ) diff --git a/internal/service/s3tables/service_package_gen.go b/internal/service/s3tables/service_package_gen.go index 1d26460b5221..24e03012b5b5 100644 --- a/internal/service/s3tables/service_package_gen.go +++ b/internal/service/s3tables/service_package_gen.go @@ -36,6 +36,10 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic Factory: newResourceTableBucketPolicy, Name: "Table Bucket Policy", }, + { + Factory: newResourceTablePolicy, + Name: "Table Policy", + }, } } diff --git a/internal/service/s3tables/table_policy.go b/internal/service/s3tables/table_policy.go new file mode 100644 index 000000000000..6e1c798e782f --- /dev/null +++ b/internal/service/s3tables/table_policy.go @@ -0,0 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3tables" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_s3tables_table_policy", name="Table Policy") +func newResourceTablePolicy(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceTablePolicy{}, nil +} + +const ( + ResNameTablePolicy = "Table Policy" +) + +type resourceTablePolicy struct { + framework.ResourceWithConfigure +} + +func (r *resourceTablePolicy) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_s3tables_table_policy" +} + +func (r *resourceTablePolicy) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: tableNameValidator, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrNamespace: schema.StringAttribute{ + Required: true, + Validators: namespaceNameValidator, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "resource_policy": schema.StringAttribute{ + CustomType: fwtypes.IAMPolicyType, + Required: true, + }, + "table_bucket_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *resourceTablePolicy) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var plan resourceTablePolicyModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var input s3tables.PutTablePolicyInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + _, err := conn.PutTablePolicy(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTablePolicy, plan.Name.String(), err), + err.Error(), + ) + return + } + + out, err := findTablePolicy(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceTablePolicy) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var state resourceTablePolicyModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findTablePolicy(ctx, conn, state.TableBucketARN.ValueString(), state.Namespace.ValueString(), state.Name.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, ResNameTablePolicy, state.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceTablePolicy) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().S3TablesClient(ctx) + + var plan resourceTablePolicyModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var input s3tables.PutTablePolicyInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + _, err := conn.PutTablePolicy(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, ResNameTablePolicy, plan.Name.String(), err), + err.Error(), + ) + return + } + + out, err := findTablePolicy(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceTablePolicy) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().S3TablesClient(ctx) + + // TIP: -- 2. Fetch the state + var state resourceTablePolicyModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := s3tables.DeleteTablePolicyInput{ + Name: state.Name.ValueStringPointer(), + Namespace: state.Namespace.ValueStringPointer(), + TableBucketARN: state.TableBucketARN.ValueStringPointer(), + } + + _, err := conn.DeleteTablePolicy(ctx, &input) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, ResNameTablePolicy, state.Name.String(), err), + err.Error(), + ) + return + } +} + +func (r *resourceTablePolicy) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + identifier, err := parseTableIdentifier(req.ID) + if err != nil { + resp.Diagnostics.AddError( + "Invalid Import ID", + "Import IDs for S3 Tables Table Policies must use the format
"+tableIDSeparator+""+tableIDSeparator+"
.\n"+ + fmt.Sprintf("Had %q", req.ID), + ) + return + } + + identifier.PopulateState(ctx, &resp.State, &resp.Diagnostics) +} + +func findTablePolicy(ctx context.Context, conn *s3tables.Client, bucketARN, namespace, name string) (*s3tables.GetTablePolicyOutput, error) { + in := &s3tables.GetTablePolicyInput{ + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(bucketARN), + } + + out, err := conn.GetTablePolicy(ctx, in) + if err != nil { + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +type resourceTablePolicyModel struct { + Name types.String `tfsdk:"name"` + Namespace types.String `tfsdk:"namespace"` + ResourcePolicy fwtypes.IAMPolicy `tfsdk:"resource_policy"` + TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` +} diff --git a/internal/service/s3tables/table_policy_test.go b/internal/service/s3tables/table_policy_test.go new file mode 100644 index 000000000000..5f765edbca94 --- /dev/null +++ b/internal/service/s3tables/table_policy_test.go @@ -0,0 +1,210 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3tables_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/s3tables" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3TablesTablePolicy_basic(t *testing.T) { + ctx := acctest.Context(t) + + var tablepolicy s3tables.GetTablePolicyOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + namespace := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rName := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + resourceName := "aws_s3tables_table_policy.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTablePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTablePolicyConfig_basic(rName, namespace, bucketName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTablePolicyExists(ctx, resourceName, &tablepolicy), + resource.TestCheckResourceAttrSet(resourceName, "resource_policy"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrName, "aws_s3tables_table.test", names.AttrName), + resource.TestCheckResourceAttrPair(resourceName, names.AttrNamespace, "aws_s3tables_table.test", names.AttrNamespace), + resource.TestCheckResourceAttrPair(resourceName, "table_bucket_arn", "aws_s3tables_table.test", "table_bucket_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccTablePolicyImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrName, + ImportStateVerifyIgnore: []string{"resource_policy"}, + }, + }, + }) +} + +func TestAccS3TablesTablePolicy_disappears(t *testing.T) { + ctx := acctest.Context(t) + + var tablepolicy s3tables.GetTablePolicyOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + namespace := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + rName := strings.ReplaceAll(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_") + resourceName := "aws_s3tables_table_policy.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTablePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTablePolicyConfig_basic(rName, namespace, bucketName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTablePolicyExists(ctx, resourceName, &tablepolicy), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.ResourceTablePolicy, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckTablePolicyDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3tables_table_policy" { + continue + } + + _, err := tfs3tables.FindTablePolicy(ctx, conn, + rs.Primary.Attributes["table_bucket_arn"], + rs.Primary.Attributes[names.AttrNamespace], + rs.Primary.Attributes[names.AttrName], + ) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTablePolicy, rs.Primary.ID, err) + } + + return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTablePolicy, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckTablePolicyExists(ctx context.Context, name string, tablepolicy *s3tables.GetTablePolicyOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTablePolicy, name, errors.New("not found")) + } + + if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes[names.AttrNamespace] == "" || rs.Primary.Attributes[names.AttrName] == "" { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTablePolicy, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + resp, err := tfs3tables.FindTablePolicy(ctx, conn, + rs.Primary.Attributes["table_bucket_arn"], + rs.Primary.Attributes[names.AttrNamespace], + rs.Primary.Attributes[names.AttrName], + ) + if err != nil { + return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTablePolicy, rs.Primary.ID, err) + } + + *tablepolicy = *resp + + return nil + } +} + +func testAccTablePolicyImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("not found: %s", resourceName) + } + + identifier := tfs3tables.TableIdentifier{ + TableBucketARN: rs.Primary.Attributes["table_bucket_arn"], + Namespace: rs.Primary.Attributes[names.AttrNamespace], + Name: rs.Primary.Attributes[names.AttrName], + } + + return identifier.String(), nil + } +} + +func testAccTablePolicyConfig_basic(rName, namespace, bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3tables_table_policy" "test" { + resource_policy = data.aws_iam_policy_document.test.json + name = aws_s3tables_table.test.name + namespace = aws_s3tables_table.test.namespace + table_bucket_arn = aws_s3tables_table.test.table_bucket_arn +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["s3tables:*"] + principals { + type = "AWS" + identifiers = [data.aws_caller_identity.current.account_id] + } + resources = ["${aws_s3tables_table.test.arn}"] + } +} + +resource "aws_s3tables_table" "test" { + name = %[1]q + namespace = aws_s3tables_namespace.test.namespace[0] + table_bucket_arn = aws_s3tables_namespace.test.table_bucket_arn + format = "ICEBERG" +} + +resource "aws_s3tables_namespace" "test" { + namespace = [%[2]q] + table_bucket_arn = aws_s3tables_table_bucket.test.arn + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_s3tables_table_bucket" "test" { + name = %[3]q +} + +data "aws_caller_identity" "current" {} +`, rName, namespace, bucketName) +} diff --git a/website/docs/r/s3tables_table.html.markdown b/website/docs/r/s3tables_table.html.markdown index d6af711a8e5f..1d7e64dcd03b 100644 --- a/website/docs/r/s3tables_table.html.markdown +++ b/website/docs/r/s3tables_table.html.markdown @@ -122,7 +122,7 @@ import { } ``` -Using `terraform import`, import S3 Tables Table using the `example_id_arg`. +Using `terraform import`, import S3 Tables Table using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). For example: ```console From 16da3e20720c45ef8b2c7efe91e2b58e00ad3914 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 25 Nov 2024 14:13:13 -0800 Subject: [PATCH 23/35] Cleanup --- .../service/s3tables/table_bucket_policy.go | 3 +- .../s3tables/table_bucket_policy_test.go | 7 +- internal/service/s3tables/table_policy.go | 1 - internal/service/s3tables/table_test.go | 20 ++--- .../r/s3tables_table_policy.html.markdown | 78 +++++++++++++++++++ 5 files changed, 91 insertions(+), 18 deletions(-) create mode 100644 website/docs/r/s3tables_table_policy.html.markdown diff --git a/internal/service/s3tables/table_bucket_policy.go b/internal/service/s3tables/table_bucket_policy.go index 7b754a695856..3292626d8849 100644 --- a/internal/service/s3tables/table_bucket_policy.go +++ b/internal/service/s3tables/table_bucket_policy.go @@ -35,7 +35,6 @@ const ( type resourceTableBucketPolicy struct { framework.ResourceWithConfigure - framework.WithTimeouts } func (r *resourceTableBucketPolicy) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { @@ -117,7 +116,7 @@ func (r *resourceTableBucketPolicy) Read(ctx context.Context, req resource.ReadR } if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionSetting, ResNameTableBucketPolicy, state.TableBucketARN.String(), err), + create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, ResNameTableBucketPolicy, state.TableBucketARN.String(), err), err.Error(), ) return diff --git a/internal/service/s3tables/table_bucket_policy_test.go b/internal/service/s3tables/table_bucket_policy_test.go index 573b3827e683..5e08ba3b5aea 100644 --- a/internal/service/s3tables/table_bucket_policy_test.go +++ b/internal/service/s3tables/table_bucket_policy_test.go @@ -39,7 +39,7 @@ func TestAccS3TablesTableBucketPolicy_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccTableBucketPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableBucketPolicyExists(ctx, resourceName, &tablebucketpolicy), resource.TestCheckResourceAttrSet(resourceName, "resource_policy"), resource.TestCheckResourceAttrPair(resourceName, "table_bucket_arn", "aws_s3tables_table_bucket.test", names.AttrARN), @@ -59,9 +59,6 @@ func TestAccS3TablesTableBucketPolicy_basic(t *testing.T) { func TestAccS3TablesTableBucketPolicy_disappears(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } var tablebucketpolicy s3tables.GetTableBucketPolicyOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -78,7 +75,7 @@ func TestAccS3TablesTableBucketPolicy_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccTableBucketPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableBucketPolicyExists(ctx, resourceName, &tablebucketpolicy), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.NewResourceTableBucketPolicy, resourceName), ), diff --git a/internal/service/s3tables/table_policy.go b/internal/service/s3tables/table_policy.go index 6e1c798e782f..b4b90dfd1eec 100644 --- a/internal/service/s3tables/table_policy.go +++ b/internal/service/s3tables/table_policy.go @@ -189,7 +189,6 @@ func (r *resourceTablePolicy) Update(ctx context.Context, req resource.UpdateReq func (r *resourceTablePolicy) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { conn := r.Meta().S3TablesClient(ctx) - // TIP: -- 2. Fetch the state var state resourceTablePolicyModel resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { diff --git a/internal/service/s3tables/table_test.go b/internal/service/s3tables/table_test.go index 7469c4fbb69b..4fef63b86005 100644 --- a/internal/service/s3tables/table_test.go +++ b/internal/service/s3tables/table_test.go @@ -124,7 +124,7 @@ func TestAccS3TablesTable_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccTableConfig_basic(rName, namespace, bucketName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableExists(ctx, resourceName, &table), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.NewResourceTable, resourceName), ), @@ -162,7 +162,7 @@ func TestAccS3TablesTable_rename(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccTableConfig_basic(rName, namespace, bucketName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableExists(ctx, resourceName, &table), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), ), @@ -177,7 +177,7 @@ func TestAccS3TablesTable_rename(t *testing.T) { }, { Config: testAccTableConfig_basic(rNameUpdated, namespace, bucketName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableExists(ctx, resourceName, &table), resource.TestCheckResourceAttr(resourceName, names.AttrName, rNameUpdated), resource.TestCheckResourceAttrSet(resourceName, "modified_at"), @@ -243,7 +243,7 @@ func TestAccS3TablesTable_updateNamespace(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccTableConfig_basic(rName, namespace, bucketName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableExists(ctx, resourceName, &table), resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, namespace), ), @@ -258,7 +258,7 @@ func TestAccS3TablesTable_updateNamespace(t *testing.T) { }, { Config: testAccTableConfig_basic(rName, namespaceUpdated, bucketName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableExists(ctx, resourceName, &table), resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, namespaceUpdated), resource.TestCheckResourceAttrSet(resourceName, "modified_at"), @@ -325,7 +325,7 @@ func TestAccS3TablesTable_updateNameAndNamespace(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccTableConfig_basic(rName, namespace, bucketName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableExists(ctx, resourceName, &table), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, namespace), @@ -341,7 +341,7 @@ func TestAccS3TablesTable_updateNameAndNamespace(t *testing.T) { }, { Config: testAccTableConfig_basic(rNameUpdated, namespaceUpdated, bucketName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableExists(ctx, resourceName, &table), resource.TestCheckResourceAttr(resourceName, names.AttrName, rNameUpdated), resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, namespaceUpdated), @@ -498,7 +498,7 @@ func testAccCheckTableExists(ctx context.Context, name string, table *s3tables.G return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTable, name, errors.New("not found")) } - if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes[names.AttrName] == "" { + if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes[names.AttrNamespace] == "" || rs.Primary.Attributes[names.AttrName] == "" { return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTable, name, errors.New("not set")) } @@ -571,14 +571,14 @@ resource "aws_s3tables_table" "test" { maintenance_configuration = { iceberg_compaction = { settings = { - target_file_size_mb = %[4]d + target_file_size_mb = %[4]d } status = "enabled" } iceberg_snapshot_management = { settings = { max_snapshot_age_hours = %[5]d - min_snapshots_to_keep = %[6]d + min_snapshots_to_keep = %[6]d } status = "enabled" } diff --git a/website/docs/r/s3tables_table_policy.html.markdown b/website/docs/r/s3tables_table_policy.html.markdown new file mode 100644 index 000000000000..3dcc162b55f8 --- /dev/null +++ b/website/docs/r/s3tables_table_policy.html.markdown @@ -0,0 +1,78 @@ +--- +subcategory: "S3 Tables" +layout: "aws" +page_title: "AWS: aws_s3tables_table_policy" +description: |- + Terraform resource for managing an AWS S3 Tables Table Policy. +--- + +# Resource: aws_s3tables_table_policy + +Terraform resource for managing an AWS S3 Tables Table Policy. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3tables_table_policy" "example" { + resource_policy = data.aws_iam_policy_document.example.json + name = aws_s3tables_table.test.name + namespace = aws_s3tables_table.test.namespace + table_bucket_arn = aws_s3tables_table.test.table_bucket_arn +} + +data "aws_iam_policy_document" "example" { + statement { + # ... + } +} + +resource "aws_s3tables_table" "example" { + name = "example-table" + namespace = aws_s3tables_namespace.example + table_bucket_arn = aws_s3tables_namespace.example.table_bucket_arn + format = "ICEBERG" +} + +resource "aws_s3tables_namespace" "example" { + namespace = ["example-namespace"] + table_bucket_arn = aws_s3tables_table_bucket.example.arn +} + +resource "aws_s3tables_table_bucket" "example" { + name = "example-bucket" +} +``` + +## Argument Reference + +The following arguments are required: + +* `resource_policy` - (Required) Amazon Web Services resource-based policy document in JSON format. +* `name` - (Required, Forces new resource) Name of the table. + Must be between 1 and 255 characters in length. + Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. +* `namespace` - (Required, Forces new resource) Name of the namespace for this table. + Must be between 1 and 255 characters in length. + Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. +* `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that contains this Namespace. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Policy using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). +For example: + +```terraform +import { + to = aws_s3tables_table_policy.example + id = "arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table" +} +``` + +Using `terraform import`, import S3 Tables Table Policy using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). +For example: + +```console +% terraform import aws_s3tables_table_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table' +``` From c80971faa83a42d96b4262c7fc93aee0637941d5 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 25 Nov 2024 14:49:46 -0800 Subject: [PATCH 24/35] Excludes flex functions from Semgrep rule --- internal/service/s3tables/table.go | 18 +++++++++--------- internal/service/s3tables/table_bucket.go | 10 +++++----- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/internal/service/s3tables/table.go b/internal/service/s3tables/table.go index 8025eadee067..e6e9dd0d5c96 100644 --- a/internal/service/s3tables/table.go +++ b/internal/service/s3tables/table.go @@ -553,7 +553,7 @@ type icebergSnapshotManagementSettingsModel struct { MinSnapshotsToKeep types.Int32 `tfsdk:"min_snapshots_to_keep"` } -func flattenTableMaintenanceConfiguration(ctx context.Context, in *s3tables.GetTableMaintenanceConfigurationOutput) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationModel], diags diag.Diagnostics) { +func flattenTableMaintenanceConfiguration(ctx context.Context, in *s3tables.GetTableMaintenanceConfigurationOutput) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationModel], diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions compactionConfig := in.Configuration[string(awstypes.TableMaintenanceTypeIcebergCompaction)] compactionConfigModel, d := flattenTableMaintenanceIcebergCompaction(ctx, &compactionConfig) diags.Append(d...) @@ -578,7 +578,7 @@ func flattenTableMaintenanceConfiguration(ctx context.Context, in *s3tables.GetT return result, diags } -func expandTableMaintenanceIcebergCompaction(ctx context.Context, in fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergCompactionSettingsModel]]) (result awstypes.TableMaintenanceConfigurationValue, diags diag.Diagnostics) { +func expandTableMaintenanceIcebergCompaction(ctx context.Context, in fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergCompactionSettingsModel]]) (result awstypes.TableMaintenanceConfigurationValue, diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-expander-functions model, d := in.ToPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -597,7 +597,7 @@ func expandTableMaintenanceIcebergCompaction(ctx context.Context, in fwtypes.Obj return result, diags } -func flattenTableMaintenanceIcebergCompaction(ctx context.Context, in *awstypes.TableMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergCompactionSettingsModel]], diags diag.Diagnostics) { +func flattenTableMaintenanceIcebergCompaction(ctx context.Context, in *awstypes.TableMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergCompactionSettingsModel]], diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions iceberg, d := flattenIcebergCompactionSettings(ctx, in.Settings) diags.Append(d...) if diags.HasError() { @@ -614,7 +614,7 @@ func flattenTableMaintenanceIcebergCompaction(ctx context.Context, in *awstypes. return result, diags } -func expandIcebergCompactionSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergCompactionSettingsModel]) (result *awstypes.TableMaintenanceSettingsMemberIcebergCompaction, diags diag.Diagnostics) { +func expandIcebergCompactionSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergCompactionSettingsModel]) (result *awstypes.TableMaintenanceSettingsMemberIcebergCompaction, diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-expander-functions model, d := in.ToPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -630,7 +630,7 @@ func expandIcebergCompactionSettings(ctx context.Context, in fwtypes.ObjectValue }, diags } -func flattenIcebergCompactionSettings(ctx context.Context, in awstypes.TableMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergCompactionSettingsModel], diags diag.Diagnostics) { +func flattenIcebergCompactionSettings(ctx context.Context, in awstypes.TableMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergCompactionSettingsModel], diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions switch t := in.(type) { case *awstypes.TableMaintenanceSettingsMemberIcebergCompaction: var model icebergCompactionSettingsModel @@ -648,7 +648,7 @@ func flattenIcebergCompactionSettings(ctx context.Context, in awstypes.TableMain return result, diags } -func expandTableMaintenanceIcebergSnapshotManagement(ctx context.Context, in fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergSnapshotManagementSettingsModel]]) (result awstypes.TableMaintenanceConfigurationValue, diags diag.Diagnostics) { +func expandTableMaintenanceIcebergSnapshotManagement(ctx context.Context, in fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergSnapshotManagementSettingsModel]]) (result awstypes.TableMaintenanceConfigurationValue, diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-expander-functions model, d := in.ToPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -667,7 +667,7 @@ func expandTableMaintenanceIcebergSnapshotManagement(ctx context.Context, in fwt return result, diags } -func flattenTableMaintenanceIcebergSnapshotManagement(ctx context.Context, in *awstypes.TableMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergSnapshotManagementSettingsModel]], diags diag.Diagnostics) { +func flattenTableMaintenanceIcebergSnapshotManagement(ctx context.Context, in *awstypes.TableMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableMaintenanceConfigurationValueModel[icebergSnapshotManagementSettingsModel]], diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions iceberg, d := flattenIcebergSnapshotManagementSettings(ctx, in.Settings) diags.Append(d...) if diags.HasError() { @@ -684,7 +684,7 @@ func flattenTableMaintenanceIcebergSnapshotManagement(ctx context.Context, in *a return result, diags } -func expandIcebergSnapshotManagementSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergSnapshotManagementSettingsModel]) (result *awstypes.TableMaintenanceSettingsMemberIcebergSnapshotManagement, diags diag.Diagnostics) { +func expandIcebergSnapshotManagementSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergSnapshotManagementSettingsModel]) (result *awstypes.TableMaintenanceSettingsMemberIcebergSnapshotManagement, diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-expander-functions model, d := in.ToPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -700,7 +700,7 @@ func expandIcebergSnapshotManagementSettings(ctx context.Context, in fwtypes.Obj }, diags } -func flattenIcebergSnapshotManagementSettings(ctx context.Context, in awstypes.TableMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergSnapshotManagementSettingsModel], diags diag.Diagnostics) { +func flattenIcebergSnapshotManagementSettings(ctx context.Context, in awstypes.TableMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergSnapshotManagementSettingsModel], diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions switch t := in.(type) { case *awstypes.TableMaintenanceSettingsMemberIcebergSnapshotManagement: var model icebergSnapshotManagementSettingsModel diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go index 362ff5b09ac8..465acba923bc 100644 --- a/internal/service/s3tables/table_bucket.go +++ b/internal/service/s3tables/table_bucket.go @@ -388,7 +388,7 @@ type icebergUnreferencedFileRemovalSettingsModel struct { UnreferencedDays types.Int32 `tfsdk:"unreferenced_days"` } -func flattenTableBucketMaintenanceConfiguration(ctx context.Context, in *s3tables.GetTableBucketMaintenanceConfigurationOutput) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationModel], diags diag.Diagnostics) { +func flattenTableBucketMaintenanceConfiguration(ctx context.Context, in *s3tables.GetTableBucketMaintenanceConfigurationOutput) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationModel], diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions unreferencedFileRemovalConfig := in.Configuration[string(awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval)] unreferencedFileRemovalConfigModel, d := flattenTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx, &unreferencedFileRemovalConfig) diags.Append(d...) @@ -405,7 +405,7 @@ func flattenTableBucketMaintenanceConfiguration(ctx context.Context, in *s3table return result, diags } -func expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx context.Context, in fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel[icebergUnreferencedFileRemovalSettingsModel]]) (result awstypes.TableBucketMaintenanceConfigurationValue, diags diag.Diagnostics) { +func expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx context.Context, in fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel[icebergUnreferencedFileRemovalSettingsModel]]) (result awstypes.TableBucketMaintenanceConfigurationValue, diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-expander-functions model, d := in.ToPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -424,7 +424,7 @@ func expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx context.Cont return result, diags } -func flattenTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx context.Context, in *awstypes.TableBucketMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel[icebergUnreferencedFileRemovalSettingsModel]], diags diag.Diagnostics) { +func flattenTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx context.Context, in *awstypes.TableBucketMaintenanceConfigurationValue) (result fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationValueModel[icebergUnreferencedFileRemovalSettingsModel]], diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions iceberg, d := flattenIcebergUnreferencedFileRemovalSettings(ctx, in.Settings) diags.Append(d...) if diags.HasError() { @@ -441,7 +441,7 @@ func flattenTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx context.Con return result, diags } -func expandIcebergUnreferencedFileRemovalSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergUnreferencedFileRemovalSettingsModel]) (result *awstypes.TableBucketMaintenanceSettingsMemberIcebergUnreferencedFileRemoval, diags diag.Diagnostics) { +func expandIcebergUnreferencedFileRemovalSettings(ctx context.Context, in fwtypes.ObjectValueOf[icebergUnreferencedFileRemovalSettingsModel]) (result *awstypes.TableBucketMaintenanceSettingsMemberIcebergUnreferencedFileRemoval, diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-expander-functions model, d := in.ToPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -457,7 +457,7 @@ func expandIcebergUnreferencedFileRemovalSettings(ctx context.Context, in fwtype }, diags } -func flattenIcebergUnreferencedFileRemovalSettings(ctx context.Context, in awstypes.TableBucketMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergUnreferencedFileRemovalSettingsModel], diags diag.Diagnostics) { +func flattenIcebergUnreferencedFileRemovalSettings(ctx context.Context, in awstypes.TableBucketMaintenanceSettings) (result fwtypes.ObjectValueOf[icebergUnreferencedFileRemovalSettingsModel], diags diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions switch t := in.(type) { case *awstypes.TableBucketMaintenanceSettingsMemberIcebergUnreferencedFileRemoval: var model icebergUnreferencedFileRemovalSettingsModel From a2c50a7f4f5c393ab75061b85a0e1554683f317b Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 25 Nov 2024 16:12:27 -0800 Subject: [PATCH 25/35] Changes `aws_s3tables_namespace` `namespace` from list to string --- internal/service/s3tables/namespace.go | 46 ++++++------------- internal/service/s3tables/namespace_test.go | 21 ++++----- .../service/s3tables/table_policy_test.go | 4 +- internal/service/s3tables/table_test.go | 10 ++-- .../docs/r/s3tables_namespace.html.markdown | 3 +- 5 files changed, 33 insertions(+), 51 deletions(-) diff --git a/internal/service/s3tables/namespace.go b/internal/service/s3tables/namespace.go index 0924d3703fe2..0dd6cccfe5ed 100644 --- a/internal/service/s3tables/namespace.go +++ b/internal/service/s3tables/namespace.go @@ -13,13 +13,11 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3tables" awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" - "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -69,20 +67,12 @@ func (r *resourceNamespace) Schema(ctx context.Context, req resource.SchemaReque stringplanmodifier.UseStateForUnknown(), }, }, - names.AttrNamespace: schema.ListAttribute{ - CustomType: fwtypes.ListOfStringType, - ElementType: types.StringType, - Required: true, - PlanModifiers: []planmodifier.List{ - listplanmodifier.RequiresReplace(), - }, - Validators: []validator.List{ - listvalidator.SizeAtLeast(1), - listvalidator.SizeAtMost(1), - listvalidator.ValueStringsAre( - namespaceNameValidator..., - ), + names.AttrNamespace: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), }, + Validators: namespaceNameValidator, }, names.AttrOwnerAccountID: schema.StringAttribute{ Computed: true, @@ -115,6 +105,7 @@ func (r *resourceNamespace) Create(ctx context.Context, req resource.CreateReque if resp.Diagnostics.HasError() { return } + input.Namespace = []string{plan.Namespace.ValueString()} out, err := conn.CreateNamespace(ctx, &input) if err != nil { @@ -145,6 +136,7 @@ func (r *resourceNamespace) Create(ctx context.Context, req resource.CreateReque if resp.Diagnostics.HasError() { return } + plan.Namespace = types.StringValue(out.Namespace[0]) resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) } @@ -158,11 +150,7 @@ func (r *resourceNamespace) Read(ctx context.Context, req resource.ReadRequest, return } - var elements []string - state.Namespace.ElementsAs(ctx, &elements, false) - namespace := elements[0] - - out, err := findNamespace(ctx, conn, state.TableBucketARN.ValueString(), namespace) + out, err := findNamespace(ctx, conn, state.TableBucketARN.ValueString(), state.Namespace.ValueString()) if tfresource.NotFound(err) { resp.State.RemoveResource(ctx) return @@ -192,12 +180,8 @@ func (r *resourceNamespace) Delete(ctx context.Context, req resource.DeleteReque return } - var elements []string - state.Namespace.ElementsAs(ctx, &elements, false) - namespace := elements[0] - input := s3tables.DeleteNamespaceInput{ - Namespace: aws.String(namespace), + Namespace: state.Namespace.ValueStringPointer(), TableBucketARN: state.TableBucketARN.ValueStringPointer(), } @@ -255,11 +239,11 @@ func findNamespace(ctx context.Context, conn *s3tables.Client, bucketARN, name s } type resourceNamespaceModel struct { - CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` - CreatedBy types.String `tfsdk:"created_by"` - Namespace fwtypes.ListValueOf[types.String] `tfsdk:"namespace"` - OwnerAccountID types.String `tfsdk:"owner_account_id"` - TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + CreatedBy types.String `tfsdk:"created_by"` + Namespace types.String `tfsdk:"namespace" autoflex:"-"` + OwnerAccountID types.String `tfsdk:"owner_account_id"` + TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` } var namespaceNameValidator = []validator.String{ @@ -303,5 +287,5 @@ func (id namespaceIdentifier) String() string { func (id namespaceIdentifier) PopulateState(ctx context.Context, s *tfsdk.State, diags *diag.Diagnostics) { diags.Append(s.SetAttribute(ctx, path.Root("table_bucket_arn"), id.TableBucketARN)...) - diags.Append(s.SetAttribute(ctx, path.Root(names.AttrNamespace), []string{id.Namespace})...) + diags.Append(s.SetAttribute(ctx, path.Root(names.AttrNamespace), id.Namespace)...) } diff --git a/internal/service/s3tables/namespace_test.go b/internal/service/s3tables/namespace_test.go index d46f30f47356..e570e9b378da 100644 --- a/internal/service/s3tables/namespace_test.go +++ b/internal/service/s3tables/namespace_test.go @@ -48,8 +48,7 @@ func TestAccS3TablesNamespace_basic(t *testing.T) { testAccCheckNamespaceExists(ctx, resourceName, &namespace), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), acctest.CheckResourceAttrAccountID(ctx, resourceName, "created_by"), - resource.TestCheckResourceAttr(resourceName, "namespace.#", "1"), - resource.TestCheckResourceAttr(resourceName, "namespace.0", rName), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, rName), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), resource.TestCheckResourceAttrPair(resourceName, "table_bucket_arn", "aws_s3tables_table_bucket.test", names.AttrARN), ), @@ -59,7 +58,7 @@ func TestAccS3TablesNamespace_basic(t *testing.T) { ImportState: true, ImportStateIdFunc: testAccNamespaceImportStateIdFunc(resourceName), ImportStateVerify: true, - ImportStateVerifyIdentifierAttribute: "namespace.0", + ImportStateVerifyIdentifierAttribute: names.AttrNamespace, }, }, }) @@ -103,7 +102,7 @@ func testAccCheckNamespaceDestroy(ctx context.Context) resource.TestCheckFunc { continue } - _, err := tfs3tables.FindNamespace(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes["namespace.0"]) + _, err := tfs3tables.FindNamespace(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace]) if tfresource.NotFound(err) { return nil } @@ -125,13 +124,13 @@ func testAccCheckNamespaceExists(ctx context.Context, name string, namespace *s3 return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, name, errors.New("not found")) } - if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes["namespace.0"] == "" { + if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes[names.AttrNamespace] == "" { return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, name, errors.New("not set")) } conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) - resp, err := tfs3tables.FindNamespace(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes["namespace.0"]) + resp, err := tfs3tables.FindNamespace(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace]) if err != nil { return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, rs.Primary.ID, err) } @@ -149,17 +148,17 @@ func testAccNamespaceImportStateIdFunc(resourceName string) resource.ImportState return "", fmt.Errorf("not found: %s", resourceName) } - return rs.Primary.Attributes["table_bucket_arn"] + tfs3tables.NamespaceIDSeparator + rs.Primary.Attributes["namespace.0"], nil + return rs.Primary.Attributes["table_bucket_arn"] + tfs3tables.NamespaceIDSeparator + rs.Primary.Attributes[names.AttrNamespace], nil } } func namespaceDisappearsStateFunc(ctx context.Context, state *tfsdk.State, is *terraform.InstanceState) error { - v, ok := is.Attributes["namespace.0"] + v, ok := is.Attributes[names.AttrNamespace] if !ok { - return errors.New(`Identifying attribute "namespace.0" not defined`) + return errors.New(`Identifying attribute "namespace" not defined`) } - if err := fwdiag.DiagnosticsError(state.SetAttribute(ctx, path.Root(names.AttrNamespace), []string{v})); err != nil { + if err := fwdiag.DiagnosticsError(state.SetAttribute(ctx, path.Root(names.AttrNamespace), v)); err != nil { return err } @@ -178,7 +177,7 @@ func namespaceDisappearsStateFunc(ctx context.Context, state *tfsdk.State, is *t func testAccNamespaceConfig_basic(rName, bucketName string) string { return fmt.Sprintf(` resource "aws_s3tables_namespace" "test" { - namespace = [%[1]q] + namespace = %[1]q table_bucket_arn = aws_s3tables_table_bucket.test.arn } diff --git a/internal/service/s3tables/table_policy_test.go b/internal/service/s3tables/table_policy_test.go index 5f765edbca94..90a62441d4ed 100644 --- a/internal/service/s3tables/table_policy_test.go +++ b/internal/service/s3tables/table_policy_test.go @@ -187,13 +187,13 @@ data "aws_iam_policy_document" "test" { resource "aws_s3tables_table" "test" { name = %[1]q - namespace = aws_s3tables_namespace.test.namespace[0] + namespace = aws_s3tables_namespace.test.namespace table_bucket_arn = aws_s3tables_namespace.test.table_bucket_arn format = "ICEBERG" } resource "aws_s3tables_namespace" "test" { - namespace = [%[2]q] + namespace = %[2]q table_bucket_arn = aws_s3tables_table_bucket.test.arn lifecycle { diff --git a/internal/service/s3tables/table_test.go b/internal/service/s3tables/table_test.go index 4fef63b86005..c54fb682b88e 100644 --- a/internal/service/s3tables/table_test.go +++ b/internal/service/s3tables/table_test.go @@ -62,7 +62,7 @@ func TestAccS3TablesTable_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "modified_at", resourceName, names.AttrCreatedAt), resource.TestCheckNoResourceAttr(resourceName, "modified_by"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttrPair(resourceName, names.AttrNamespace, "aws_s3tables_namespace.test", "namespace.0"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrNamespace, "aws_s3tables_namespace.test", names.AttrNamespace), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), resource.TestCheckResourceAttrPair(resourceName, "table_bucket_arn", "aws_s3tables_table_bucket.test", names.AttrARN), resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.TableTypeCustomer)), @@ -540,13 +540,13 @@ func testAccTableConfig_basic(rName, namespace, bucketName string) string { return fmt.Sprintf(` resource "aws_s3tables_table" "test" { name = %[1]q - namespace = aws_s3tables_namespace.test.namespace[0] + namespace = aws_s3tables_namespace.test.namespace table_bucket_arn = aws_s3tables_namespace.test.table_bucket_arn format = "ICEBERG" } resource "aws_s3tables_namespace" "test" { - namespace = [%[2]q] + namespace = %[2]q table_bucket_arn = aws_s3tables_table_bucket.test.arn lifecycle { @@ -564,7 +564,7 @@ func testAccTableConfig_maintenanceConfiguration(rName, namespace, bucketName st return fmt.Sprintf(` resource "aws_s3tables_table" "test" { name = %[1]q - namespace = aws_s3tables_namespace.test.namespace[0] + namespace = aws_s3tables_namespace.test.namespace table_bucket_arn = aws_s3tables_namespace.test.table_bucket_arn format = "ICEBERG" @@ -586,7 +586,7 @@ resource "aws_s3tables_table" "test" { } resource "aws_s3tables_namespace" "test" { - namespace = [%[2]q] + namespace = %[2]q table_bucket_arn = aws_s3tables_table_bucket.test.arn lifecycle { diff --git a/website/docs/r/s3tables_namespace.html.markdown b/website/docs/r/s3tables_namespace.html.markdown index 48b3193bdbb0..9f8ba73f4512 100644 --- a/website/docs/r/s3tables_namespace.html.markdown +++ b/website/docs/r/s3tables_namespace.html.markdown @@ -16,7 +16,7 @@ Terraform resource for managing an AWS S3 Tables Namespace. ```terraform resource "aws_s3tables_namespace" "example" { - namespace = ["example-namespace"] + namespace = "example-namespace" table_bucket_arn = aws_s3tables_table_bucket.example.arn } @@ -30,7 +30,6 @@ resource "aws_s3tables_table_bucket" "example" { The following arguments are required: * `namespace` - (Required, Forces new resource) Name of the namespace. - Note that this is a list with a maximum size of 1. Must be between 1 and 255 characters in length. Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. * `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that contains this Namespace. From 709e4d8d120ae1c400287a82e805fad9232d9ca1 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 26 Nov 2024 10:48:53 -0800 Subject: [PATCH 26/35] Documentation cleanup --- website/docs/r/s3tables_table.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/s3tables_table.html.markdown b/website/docs/r/s3tables_table.html.markdown index 1d7e64dcd03b..e443c25f22e1 100644 --- a/website/docs/r/s3tables_table.html.markdown +++ b/website/docs/r/s3tables_table.html.markdown @@ -23,7 +23,7 @@ resource "aws_s3tables_table" "example" { } resource "aws_s3tables_namespace" "example" { - namespace = ["example-namespace"] + namespace = "example-namespace" table_bucket_arn = aws_s3tables_table_bucket.example.arn } From e4799a08d0ba3b4c5c7d0fa94f3051a16aca4b3f Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 26 Nov 2024 17:39:32 -0800 Subject: [PATCH 27/35] Use `noflatten` for `aws_s3tables_table` `Namespace` --- internal/service/s3tables/table.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/service/s3tables/table.go b/internal/service/s3tables/table.go index e6e9dd0d5c96..fd111f833acc 100644 --- a/internal/service/s3tables/table.go +++ b/internal/service/s3tables/table.go @@ -159,7 +159,6 @@ func (r *resourceTable) Create(ctx context.Context, req resource.CreateRequest, if resp.Diagnostics.HasError() { return } - input.Namespace = plan.Namespace.ValueStringPointer() _, err := conn.CreateTable(ctx, &input) if err != nil { @@ -526,7 +525,7 @@ type resourceTableModel struct { ModifiedAt timetypes.RFC3339 `tfsdk:"modified_at"` ModifiedBy types.String `tfsdk:"modified_by"` Name types.String `tfsdk:"name"` - Namespace types.String `tfsdk:"namespace" autoflex:"-"` + Namespace types.String `tfsdk:"namespace" autoflex:",noflatten"` // On read, Namespace is an array OwnerAccountID types.String `tfsdk:"owner_account_id"` TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` Type fwtypes.StringEnum[awstypes.TableType] `tfsdk:"type"` From 6f801b511e965411c0c0fcb62e2aa8d30bddd488 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 26 Nov 2024 17:40:05 -0800 Subject: [PATCH 28/35] Don't allocate Input structs on heap --- internal/service/s3tables/namespace.go | 4 ++-- internal/service/s3tables/table.go | 4 ++-- internal/service/s3tables/table_bucket_policy.go | 4 ++-- internal/service/s3tables/table_policy.go | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/service/s3tables/namespace.go b/internal/service/s3tables/namespace.go index 0dd6cccfe5ed..f73944ce9812 100644 --- a/internal/service/s3tables/namespace.go +++ b/internal/service/s3tables/namespace.go @@ -214,12 +214,12 @@ func (r *resourceNamespace) ImportState(ctx context.Context, req resource.Import } func findNamespace(ctx context.Context, conn *s3tables.Client, bucketARN, name string) (*s3tables.GetNamespaceOutput, error) { - in := &s3tables.GetNamespaceInput{ + in := s3tables.GetNamespaceInput{ Namespace: aws.String(name), TableBucketARN: aws.String(bucketARN), } - out, err := conn.GetNamespace(ctx, in) + out, err := conn.GetNamespace(ctx, &in) if err != nil { if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ diff --git a/internal/service/s3tables/table.go b/internal/service/s3tables/table.go index fd111f833acc..37bf68453775 100644 --- a/internal/service/s3tables/table.go +++ b/internal/service/s3tables/table.go @@ -490,13 +490,13 @@ func (r *resourceTable) ImportState(ctx context.Context, req resource.ImportStat } func findTable(ctx context.Context, conn *s3tables.Client, bucketARN, namespace, name string) (*s3tables.GetTableOutput, error) { - in := &s3tables.GetTableInput{ + in := s3tables.GetTableInput{ Name: aws.String(name), Namespace: aws.String(namespace), TableBucketARN: aws.String(bucketARN), } - out, err := conn.GetTable(ctx, in) + out, err := conn.GetTable(ctx, &in) if err != nil { if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ diff --git a/internal/service/s3tables/table_bucket_policy.go b/internal/service/s3tables/table_bucket_policy.go index 3292626d8849..861ad62e7524 100644 --- a/internal/service/s3tables/table_bucket_policy.go +++ b/internal/service/s3tables/table_bucket_policy.go @@ -203,11 +203,11 @@ func (r *resourceTableBucketPolicy) ImportState(ctx context.Context, req resourc } func findTableBucketPolicy(ctx context.Context, conn *s3tables.Client, tableBucketARN string) (*s3tables.GetTableBucketPolicyOutput, error) { - in := &s3tables.GetTableBucketPolicyInput{ + in := s3tables.GetTableBucketPolicyInput{ TableBucketARN: aws.String(tableBucketARN), } - out, err := conn.GetTableBucketPolicy(ctx, in) + out, err := conn.GetTableBucketPolicy(ctx, &in) if err != nil { if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ diff --git a/internal/service/s3tables/table_policy.go b/internal/service/s3tables/table_policy.go index b4b90dfd1eec..01f007f7ef50 100644 --- a/internal/service/s3tables/table_policy.go +++ b/internal/service/s3tables/table_policy.go @@ -230,13 +230,13 @@ func (r *resourceTablePolicy) ImportState(ctx context.Context, req resource.Impo } func findTablePolicy(ctx context.Context, conn *s3tables.Client, bucketARN, namespace, name string) (*s3tables.GetTablePolicyOutput, error) { - in := &s3tables.GetTablePolicyInput{ + in := s3tables.GetTablePolicyInput{ Name: aws.String(name), Namespace: aws.String(namespace), TableBucketARN: aws.String(bucketARN), } - out, err := conn.GetTablePolicy(ctx, in) + out, err := conn.GetTablePolicy(ctx, &in) if err != nil { if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ From a49f97746e3e51895c3d93dbbfc95506274fbaf9 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 3 Dec 2024 13:28:14 -0800 Subject: [PATCH 29/35] Adds CHANGELOG entry --- .changelog/40420.txt | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .changelog/40420.txt diff --git a/.changelog/40420.txt b/.changelog/40420.txt new file mode 100644 index 000000000000..aa4caaeb3ade --- /dev/null +++ b/.changelog/40420.txt @@ -0,0 +1,19 @@ +```release-note:new-resource +aws_s3tables_namespace +``` + +```release-note:new-resource +aws_s3tables_table_bucket +``` + +```release-note:new-resource +aws_s3tables_table_bucket_policy +``` + +```release-note:new-resource +aws_s3tables_table +``` + +```release-note:new-resource +aws_s3tables_table_policy +``` From 41d327eaa9950819a28356d2d3601afb4f42aff0 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 3 Dec 2024 13:38:03 -0800 Subject: [PATCH 30/35] Corrects branding in documentation --- website/docs/r/s3tables_namespace.html.markdown | 4 ++-- website/docs/r/s3tables_table.html.markdown | 4 ++-- website/docs/r/s3tables_table_bucket.html.markdown | 4 ++-- website/docs/r/s3tables_table_bucket_policy.html.markdown | 4 ++-- website/docs/r/s3tables_table_policy.html.markdown | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/website/docs/r/s3tables_namespace.html.markdown b/website/docs/r/s3tables_namespace.html.markdown index 9f8ba73f4512..e21e87c72634 100644 --- a/website/docs/r/s3tables_namespace.html.markdown +++ b/website/docs/r/s3tables_namespace.html.markdown @@ -3,12 +3,12 @@ subcategory: "S3 Tables" layout: "aws" page_title: "AWS: aws_s3tables_namespace" description: |- - Terraform resource for managing an AWS S3 Tables Namespace. + Terraform resource for managing an Amazon S3 Tables Namespace. --- # Resource: aws_s3tables_namespace -Terraform resource for managing an AWS S3 Tables Namespace. +Terraform resource for managing an Amazon S3 Tables Namespace. ## Example Usage diff --git a/website/docs/r/s3tables_table.html.markdown b/website/docs/r/s3tables_table.html.markdown index e443c25f22e1..0d4836811ab3 100644 --- a/website/docs/r/s3tables_table.html.markdown +++ b/website/docs/r/s3tables_table.html.markdown @@ -3,12 +3,12 @@ subcategory: "S3 Tables" layout: "aws" page_title: "AWS: aws_s3tables_table" description: |- - Terraform resource for managing an AWS S3 Tables Table. + Terraform resource for managing an Amazon S3 Tables Table. --- # Resource: aws_s3tables_table -Terraform resource for managing an AWS S3 Tables Table. +Terraform resource for managing an Amazon S3 Tables Table. ## Example Usage diff --git a/website/docs/r/s3tables_table_bucket.html.markdown b/website/docs/r/s3tables_table_bucket.html.markdown index 7084d6b20770..6b80e0a03178 100644 --- a/website/docs/r/s3tables_table_bucket.html.markdown +++ b/website/docs/r/s3tables_table_bucket.html.markdown @@ -3,12 +3,12 @@ subcategory: "S3 Tables" layout: "aws" page_title: "AWS: aws_s3tables_table_bucket" description: |- - Terraform resource for managing an AWS S3 Tables Table Bucket. + Terraform resource for managing an Amazon S3 Tables Table Bucket. --- # Resource: aws_s3tables_table_bucket -Terraform resource for managing an AWS S3 Tables Table Bucket. +Terraform resource for managing an Amazon S3 Tables Table Bucket. ## Example Usage diff --git a/website/docs/r/s3tables_table_bucket_policy.html.markdown b/website/docs/r/s3tables_table_bucket_policy.html.markdown index a0652701ade6..12e49ca0c505 100644 --- a/website/docs/r/s3tables_table_bucket_policy.html.markdown +++ b/website/docs/r/s3tables_table_bucket_policy.html.markdown @@ -3,12 +3,12 @@ subcategory: "S3 Tables" layout: "aws" page_title: "AWS: aws_s3tables_table_bucket_policy" description: |- - Terraform resource for managing an AWS S3 Tables Table Bucket Policy. + Terraform resource for managing an Amazon S3 Tables Table Bucket Policy. --- # Resource: aws_s3tables_table_bucket_policy -Terraform resource for managing an AWS S3 Tables Table Bucket Policy. +Terraform resource for managing an Amazon S3 Tables Table Bucket Policy. ## Example Usage diff --git a/website/docs/r/s3tables_table_policy.html.markdown b/website/docs/r/s3tables_table_policy.html.markdown index 3dcc162b55f8..935a480d0c95 100644 --- a/website/docs/r/s3tables_table_policy.html.markdown +++ b/website/docs/r/s3tables_table_policy.html.markdown @@ -3,12 +3,12 @@ subcategory: "S3 Tables" layout: "aws" page_title: "AWS: aws_s3tables_table_policy" description: |- - Terraform resource for managing an AWS S3 Tables Table Policy. + Terraform resource for managing an Amazon S3 Tables Table Policy. --- # Resource: aws_s3tables_table_policy -Terraform resource for managing an AWS S3 Tables Table Policy. +Terraform resource for managing an Amazon S3 Tables Table Policy. ## Example Usage From 4badbf54aaa9d55ae9de81c52a57edbe0cf21c9b Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 3 Dec 2024 14:52:15 -0800 Subject: [PATCH 31/35] Fixes `copyloopvar` linting errors --- internal/framework/validators/prefix_none_of_test.go | 3 --- internal/framework/validators/suffix_none_of_test.go | 3 --- 2 files changed, 6 deletions(-) diff --git a/internal/framework/validators/prefix_none_of_test.go b/internal/framework/validators/prefix_none_of_test.go index a43b8804ebba..69619e138e15 100644 --- a/internal/framework/validators/prefix_none_of_test.go +++ b/internal/framework/validators/prefix_none_of_test.go @@ -68,8 +68,6 @@ func TestPrefixNoneOfValidator(t *testing.T) { } for name, test := range testCases { - name, test := name, test - t.Run(fmt.Sprintf("ValidateString - %s", name), func(t *testing.T) { t.Parallel() req := validator.StringRequest{ @@ -105,7 +103,6 @@ func TestPrefixNoneOfValidator_Description(t *testing.T) { } for name, test := range testCases { - name, test := name, test t.Run(name, func(t *testing.T) { t.Parallel() diff --git a/internal/framework/validators/suffix_none_of_test.go b/internal/framework/validators/suffix_none_of_test.go index fe62d184b891..ace6792fb89b 100644 --- a/internal/framework/validators/suffix_none_of_test.go +++ b/internal/framework/validators/suffix_none_of_test.go @@ -63,8 +63,6 @@ func TestSuffixNoneOfValidator(t *testing.T) { } for name, test := range testCases { - name, test := name, test - t.Run(fmt.Sprintf("ValidateString - %s", name), func(t *testing.T) { t.Parallel() req := validator.StringRequest{ @@ -100,7 +98,6 @@ func TestSuffixNoneOfValidator_Description(t *testing.T) { } for name, test := range testCases { - name, test := name, test t.Run(name, func(t *testing.T) { t.Parallel() From 9cc60bf2a817d6aa3bb9eb314f5dbf0ead1bcd4c Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 3 Dec 2024 15:38:01 -0800 Subject: [PATCH 32/35] `tfproviderdocs` doesn't treat newline and space as equivalent --- website/docs/r/s3tables_namespace.html.markdown | 6 ++---- website/docs/r/s3tables_table.html.markdown | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/website/docs/r/s3tables_namespace.html.markdown b/website/docs/r/s3tables_namespace.html.markdown index e21e87c72634..73deb49dd71a 100644 --- a/website/docs/r/s3tables_namespace.html.markdown +++ b/website/docs/r/s3tables_namespace.html.markdown @@ -44,8 +44,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). -For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). For example: ```terraform import { @@ -54,8 +53,7 @@ import { } ``` -Using `terraform import`, import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). -For example: +Using `terraform import`, import S3 Tables Namespace using the `table_bucket_arn` and the value of `namespace`, separated by a semicolon (`;`). For example: ```console % terraform import aws_s3tables_namespace.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' diff --git a/website/docs/r/s3tables_table.html.markdown b/website/docs/r/s3tables_table.html.markdown index 0d4836811ab3..cc053b433db9 100644 --- a/website/docs/r/s3tables_table.html.markdown +++ b/website/docs/r/s3tables_table.html.markdown @@ -112,8 +112,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). -For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). For example: ```terraform import { @@ -122,8 +121,7 @@ import { } ``` -Using `terraform import`, import S3 Tables Table using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). -For example: +Using `terraform import`, import S3 Tables Table using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). For example: ```console % terraform import aws_s3tables_table.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table' From b88c7ea3d9b6bac1fd0e1df18fc6705e1de5d3b4 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 3 Dec 2024 15:39:52 -0800 Subject: [PATCH 33/35] Removes deprecated variable interpolation --- internal/service/s3tables/table_policy_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3tables/table_policy_test.go b/internal/service/s3tables/table_policy_test.go index 90a62441d4ed..8af7c3d86da2 100644 --- a/internal/service/s3tables/table_policy_test.go +++ b/internal/service/s3tables/table_policy_test.go @@ -181,7 +181,7 @@ data "aws_iam_policy_document" "test" { type = "AWS" identifiers = [data.aws_caller_identity.current.account_id] } - resources = ["${aws_s3tables_table.test.arn}"] + resources = [aws_s3tables_table.test.arn] } } From 203a8115163e5eda533e0962ecb5a7d8ceb67b9b Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 3 Dec 2024 15:41:46 -0800 Subject: [PATCH 34/35] More `tfproviderdocs` doesn't treat newline and space as equivalent --- website/docs/r/s3tables_table_bucket.html.markdown | 6 ++---- website/docs/r/s3tables_table_bucket_policy.html.markdown | 6 ++---- website/docs/r/s3tables_table_policy.html.markdown | 6 ++---- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/website/docs/r/s3tables_table_bucket.html.markdown b/website/docs/r/s3tables_table_bucket.html.markdown index 6b80e0a03178..bd9b6535f903 100644 --- a/website/docs/r/s3tables_table_bucket.html.markdown +++ b/website/docs/r/s3tables_table_bucket.html.markdown @@ -69,8 +69,7 @@ This resource exports the following attributes in addition to the argument above ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket using the `arn`. -For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket using the `arn`. For example: ```terraform import { @@ -79,8 +78,7 @@ import { } ``` -Using `terraform import`, import S3 Tables Table Bucket using the `arn`. -For example: +Using `terraform import`, import S3 Tables Table Bucket using the `arn`. For example: ```console % terraform import aws_s3tables_table_bucket.example arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket diff --git a/website/docs/r/s3tables_table_bucket_policy.html.markdown b/website/docs/r/s3tables_table_bucket_policy.html.markdown index 12e49ca0c505..b113ca008267 100644 --- a/website/docs/r/s3tables_table_bucket_policy.html.markdown +++ b/website/docs/r/s3tables_table_bucket_policy.html.markdown @@ -40,8 +40,7 @@ The following arguments are required: ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket Policy using the `table_bucket_arn`. -For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket Policy using the `table_bucket_arn`. For example: ```terraform import { @@ -50,8 +49,7 @@ import { } ``` -Using `terraform import`, import S3 Tables Table Bucket Policy using the `table_bucket_arn`. -For example: +Using `terraform import`, import S3 Tables Table Bucket Policy using the `table_bucket_arn`. For example: ```console % terraform import aws_s3tables_table_bucket_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' diff --git a/website/docs/r/s3tables_table_policy.html.markdown b/website/docs/r/s3tables_table_policy.html.markdown index 935a480d0c95..b38eea9965ce 100644 --- a/website/docs/r/s3tables_table_policy.html.markdown +++ b/website/docs/r/s3tables_table_policy.html.markdown @@ -60,8 +60,7 @@ The following arguments are required: ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Policy using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). -For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Policy using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). For example: ```terraform import { @@ -70,8 +69,7 @@ import { } ``` -Using `terraform import`, import S3 Tables Table Policy using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). -For example: +Using `terraform import`, import S3 Tables Table Policy using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). For example: ```console % terraform import aws_s3tables_table_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table' From 533bd2c4ae6a44b7ed175c6b2bea9d9b1f1b2de9 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 3 Dec 2024 15:42:06 -0800 Subject: [PATCH 35/35] Required phrasing --- website/docs/r/s3tables_table_bucket.html.markdown | 2 +- website/docs/r/s3tables_table_bucket_policy.html.markdown | 4 ++++ website/docs/r/s3tables_table_policy.html.markdown | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3tables_table_bucket.html.markdown b/website/docs/r/s3tables_table_bucket.html.markdown index bd9b6535f903..a465d7fcf1a2 100644 --- a/website/docs/r/s3tables_table_bucket.html.markdown +++ b/website/docs/r/s3tables_table_bucket.html.markdown @@ -61,7 +61,7 @@ The `iceberg_unreferenced_file_removal.settings` configuration block supports th ## Attribute Reference -This resource exports the following attributes in addition to the argument above: +This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the table bucket. * `created_at` - Date and time when the bucket was created. diff --git a/website/docs/r/s3tables_table_bucket_policy.html.markdown b/website/docs/r/s3tables_table_bucket_policy.html.markdown index b113ca008267..003f49ff4d95 100644 --- a/website/docs/r/s3tables_table_bucket_policy.html.markdown +++ b/website/docs/r/s3tables_table_bucket_policy.html.markdown @@ -38,6 +38,10 @@ The following arguments are required: * `resource_policy` - (Required) Amazon Web Services resource-based policy document in JSON format. * `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that owns this policy. +## Attribute Reference + +This resource exports no additional attributes. + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Bucket Policy using the `table_bucket_arn`. For example: diff --git a/website/docs/r/s3tables_table_policy.html.markdown b/website/docs/r/s3tables_table_policy.html.markdown index b38eea9965ce..32aeeaea50be 100644 --- a/website/docs/r/s3tables_table_policy.html.markdown +++ b/website/docs/r/s3tables_table_policy.html.markdown @@ -58,6 +58,10 @@ The following arguments are required: Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. * `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that contains this Namespace. +## Attribute Reference + +This resource exports no additional attributes. + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Tables Table Policy using the `table_bucket_arn`, the value of `namespace`, and the value of `name`, separated by a semicolon (`;`). For example: