diff --git a/.changelog/33245.txt b/.changelog/33245.txt new file mode 100644 index 000000000000..09c04a339339 --- /dev/null +++ b/.changelog/33245.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_fsx_openzfs_file_system: Add `endpoint_ip_address_range`, `preferred_subnet_id` and `route_table_ids` arguments to support the [Multi-AZ deployment type](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#choosing-single-or-multi) +``` + +```release-note:bug +resource/aws_fsx_openzfs_file_system: Wait for administrative action completion when updating root volume +``` \ No newline at end of file diff --git a/internal/service/fsx/data_repository_association.go b/internal/service/fsx/data_repository_association.go index 487bbcdafa1c..9944bd7219b0 100644 --- a/internal/service/fsx/data_repository_association.go +++ b/internal/service/fsx/data_repository_association.go @@ -5,6 +5,7 @@ package fsx import ( "context" + "errors" "log" "time" @@ -15,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -34,6 +36,7 @@ func ResourceDataRepositoryAssociation() *schema.Resource { ReadWithoutTimeout: resourceDataRepositoryAssociationRead, UpdateWithoutTimeout: resourceDataRepositoryAssociationUpdate, DeleteWithoutTimeout: resourceDataRepositoryAssociationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -67,6 +70,11 @@ func ResourceDataRepositoryAssociation() *schema.Resource { validation.StringMatch(regexache.MustCompile(`^s3://`), "must begin with s3://"), ), }, + "delete_data_in_filesystem": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "file_system_id": { Type: schema.TypeString, ForceNew: true, @@ -142,11 +150,6 @@ func ResourceDataRepositoryAssociation() *schema.Resource { }, DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, }, - "delete_data_in_filesystem": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), }, @@ -184,44 +187,13 @@ func resourceDataRepositoryAssociationCreate(ctx context.Context, d *schema.Reso result, err := conn.CreateDataRepositoryAssociationWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx Lustre Data Repository Association: %s", err) + return sdkdiag.AppendErrorf(diags, "creating FSx for Lustre Data Repository Association: %s", err) } d.SetId(aws.StringValue(result.Association.AssociationId)) if _, err := waitDataRepositoryAssociationCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Lustre Data Repository Association (%s) create: %s", d.Id(), err) - } - - return append(diags, resourceDataRepositoryAssociationRead(ctx, d, meta)...) -} - -func resourceDataRepositoryAssociationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) - - if d.HasChangesExcept("tags_all", "tags") { - input := &fsx.UpdateDataRepositoryAssociationInput{ - ClientRequestToken: aws.String(id.UniqueId()), - AssociationId: aws.String(d.Id()), - } - - if d.HasChange("imported_file_chunk_size") { - input.ImportedFileChunkSize = aws.Int64(int64(d.Get("imported_file_chunk_size").(int))) - } - - if d.HasChange("s3") { - input.S3 = expandDataRepositoryAssociationS3(d.Get("s3").([]interface{})) - } - - _, err := conn.UpdateDataRepositoryAssociationWithContext(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating FSX Lustre Data Repository Association (%s): %s", d.Id(), err) - } - - if _, err := waitDataRepositoryAssociationUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Lustre Data Repository Association (%s) update: %s", d.Id(), err) - } + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre Data Repository Association (%s) create: %s", d.Id(), err) } return append(diags, resourceDataRepositoryAssociationRead(ctx, d, meta)...) @@ -232,14 +204,15 @@ func resourceDataRepositoryAssociationRead(ctx context.Context, d *schema.Resour conn := meta.(*conns.AWSClient).FSxConn(ctx) association, err := FindDataRepositoryAssociationByID(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] FSx Lustre Data Repository Association (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] FSx for Lustre Data Repository Association (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading FSx Lustre Data Repository Association (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading FSx for Lustre Data Repository Association (%s): %s", d.Id(), err) } d.Set("arn", association.ResourceARN) @@ -249,7 +222,7 @@ func resourceDataRepositoryAssociationRead(ctx context.Context, d *schema.Resour d.Set("file_system_path", association.FileSystemPath) d.Set("imported_file_chunk_size", association.ImportedFileChunkSize) if err := d.Set("s3", flattenDataRepositoryAssociationS3(association.S3)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting s3 data repository configuration: %s", err) + return sdkdiag.AppendErrorf(diags, "setting s3: %s", err) } setTagsOut(ctx, association.Tags) @@ -257,17 +230,49 @@ func resourceDataRepositoryAssociationRead(ctx context.Context, d *schema.Resour return diags } +func resourceDataRepositoryAssociationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FSxConn(ctx) + + if d.HasChangesExcept("tags", "tags_all") { + input := &fsx.UpdateDataRepositoryAssociationInput{ + AssociationId: aws.String(d.Id()), + ClientRequestToken: aws.String(id.UniqueId()), + } + + if d.HasChange("imported_file_chunk_size") { + input.ImportedFileChunkSize = aws.Int64(int64(d.Get("imported_file_chunk_size").(int))) + } + + if d.HasChange("s3") { + input.S3 = expandDataRepositoryAssociationS3(d.Get("s3").([]interface{})) + } + + _, err := conn.UpdateDataRepositoryAssociationWithContext(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating FSx for Lustre Data Repository Association (%s): %s", d.Id(), err) + } + + if _, err := waitDataRepositoryAssociationUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre Data Repository Association (%s) update: %s", d.Id(), err) + } + } + + return append(diags, resourceDataRepositoryAssociationRead(ctx, d, meta)...) +} + func resourceDataRepositoryAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) request := &fsx.DeleteDataRepositoryAssociationInput{ - ClientRequestToken: aws.String(id.UniqueId()), AssociationId: aws.String(d.Id()), + ClientRequestToken: aws.String(id.UniqueId()), DeleteDataInFileSystem: aws.Bool(d.Get("delete_data_in_filesystem").(bool)), } - log.Printf("[DEBUG] Deleting FSx Lustre Data Repository Association: %s", d.Id()) + log.Printf("[DEBUG] Deleting FSx for Lustre Data Repository Association: %s", d.Id()) _, err := conn.DeleteDataRepositoryAssociationWithContext(ctx, request) if tfawserr.ErrCodeEquals(err, fsx.ErrCodeDataRepositoryAssociationNotFound) { @@ -275,11 +280,11 @@ func resourceDataRepositoryAssociationDelete(ctx context.Context, d *schema.Reso } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting FSx Lustre Data Repository Association (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting FSx for Lustre Data Repository Association (%s): %s", d.Id(), err) } if _, err := waitDataRepositoryAssociationDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Lustre Data Repository Association (%s) to deleted: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre Data Repository Association (%s) delete: %s", d.Id(), err) } return diags @@ -375,3 +380,134 @@ func flattenS3AutoImportPolicy(policy *fsx.AutoImportPolicy) []map[string][]inte return []map[string][]interface{}{result} } + +func FindDataRepositoryAssociationByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.DataRepositoryAssociation, error) { + input := &fsx.DescribeDataRepositoryAssociationsInput{ + AssociationIds: aws.StringSlice([]string{id}), + } + + return findDataRepositoryAssociation(ctx, conn, input) +} + +func findDataRepositoryAssociation(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeDataRepositoryAssociationsInput) (*fsx.DataRepositoryAssociation, error) { + output, err := findDataRepositoryAssociations(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findDataRepositoryAssociations(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeDataRepositoryAssociationsInput) ([]*fsx.DataRepositoryAssociation, error) { + var output []*fsx.DataRepositoryAssociation + + err := conn.DescribeDataRepositoryAssociationsPagesWithContext(ctx, input, func(page *fsx.DescribeDataRepositoryAssociationsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Associations { + if v != nil { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeDataRepositoryAssociationNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} + +func statusDataRepositoryAssociation(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindDataRepositoryAssociationByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Lifecycle), nil + } +} + +func waitDataRepositoryAssociationCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.DataRepositoryLifecycleCreating}, + Target: []string{fsx.DataRepositoryLifecycleAvailable}, + Refresh: statusDataRepositoryAssociation(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + + return output, err + } + + return nil, err +} + +func waitDataRepositoryAssociationUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.DataRepositoryLifecycleUpdating}, + Target: []string{fsx.DataRepositoryLifecycleAvailable}, + Refresh: statusDataRepositoryAssociation(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + + return output, err + } + + return nil, err +} + +func waitDataRepositoryAssociationDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{fsx.DataRepositoryLifecycleAvailable, fsx.DataRepositoryLifecycleDeleting}, + Target: []string{}, + Refresh: statusDataRepositoryAssociation(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { + if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + } + + return output, err + } + + return nil, err +} diff --git a/internal/service/fsx/data_repository_association_test.go b/internal/service/fsx/data_repository_association_test.go index 85f40dda9399..c4249a3dce65 100644 --- a/internal/service/fsx/data_repository_association_test.go +++ b/internal/service/fsx/data_repository_association_test.go @@ -24,24 +24,25 @@ import ( func TestAccFSxDataRepositoryAssociation_basic(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketPath := fmt.Sprintf("s3://%s", bucketName) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketPath := fmt.Sprintf("s3://%s", rName) fileSystemPath := "/test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_fileSystemPath(bucketName, fileSystemPath), + Config: testAccDataRepositoryAssociationConfig_fileSystemPath(rName, rName, fileSystemPath), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`association/fs-.+/dra-.+`)), @@ -64,23 +65,24 @@ func TestAccFSxDataRepositoryAssociation_basic(t *testing.T) { func TestAccFSxDataRepositoryAssociation_disappears(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_fileSystemPath(bucketName, fileSystemPath), + Config: testAccDataRepositoryAssociationConfig_fileSystemPath(rName, rName, fileSystemPath), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceDataRepositoryAssociation(), resourceName), @@ -93,24 +95,25 @@ func TestAccFSxDataRepositoryAssociation_disappears(t *testing.T) { func TestAccFSxDataRepositoryAssociation_disappears_ParentFileSystem(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association fsx.DataRepositoryAssociation parentResourceName := "aws_fsx_lustre_file_system.test" resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_fileSystemPath(bucketName, fileSystemPath), + Config: testAccDataRepositoryAssociationConfig_fileSystemPath(rName, rName, fileSystemPath), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceLustreFileSystem(), parentResourceName), @@ -123,24 +126,25 @@ func TestAccFSxDataRepositoryAssociation_disappears_ParentFileSystem(t *testing. func TestAccFSxDataRepositoryAssociation_fileSystemPathUpdated(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association1, association2 fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath1 := "/test1" fileSystemPath2 := "/test2" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_fileSystemPath(bucketName, fileSystemPath1), + Config: testAccDataRepositoryAssociationConfig_fileSystemPath(rName, rName, fileSystemPath1), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association1), resource.TestCheckResourceAttr(resourceName, "file_system_path", fileSystemPath1), @@ -153,7 +157,7 @@ func TestAccFSxDataRepositoryAssociation_fileSystemPathUpdated(t *testing.T) { ImportStateVerifyIgnore: []string{"delete_data_in_filesystem"}, }, { - Config: testAccDataRepositoryAssociationConfig_fileSystemPath(bucketName, fileSystemPath2), + Config: testAccDataRepositoryAssociationConfig_fileSystemPath(rName, rName, fileSystemPath2), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association2), testAccCheckDataRepositoryAssociationRecreated(&association1, &association2), @@ -166,12 +170,9 @@ func TestAccFSxDataRepositoryAssociation_fileSystemPathUpdated(t *testing.T) { func TestAccFSxDataRepositoryAssociation_dataRepositoryPathUpdated(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association1, association2 fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketPath1 := fmt.Sprintf("s3://%s", bucketName1) bucketName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -179,13 +180,18 @@ func TestAccFSxDataRepositoryAssociation_dataRepositoryPathUpdated(t *testing.T) fileSystemPath := "/test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_fileSystemPath(bucketName1, fileSystemPath), + Config: testAccDataRepositoryAssociationConfig_fileSystemPath(rName, bucketName1, fileSystemPath), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association1), resource.TestCheckResourceAttr(resourceName, "data_repository_path", bucketPath1), @@ -198,7 +204,7 @@ func TestAccFSxDataRepositoryAssociation_dataRepositoryPathUpdated(t *testing.T) ImportStateVerifyIgnore: []string{"delete_data_in_filesystem"}, }, { - Config: testAccDataRepositoryAssociationConfig_fileSystemPath(bucketName2, fileSystemPath), + Config: testAccDataRepositoryAssociationConfig_fileSystemPath(rName, bucketName2, fileSystemPath), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association2), testAccCheckDataRepositoryAssociationRecreated(&association1, &association2), @@ -212,23 +218,24 @@ func TestAccFSxDataRepositoryAssociation_dataRepositoryPathUpdated(t *testing.T) // lintignore:AT002 func TestAccFSxDataRepositoryAssociation_importedFileChunkSize(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_importedFileChunkSize(bucketName, fileSystemPath, 256), + Config: testAccDataRepositoryAssociationConfig_importedFileChunkSize(rName, rName, fileSystemPath, 256), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association), resource.TestCheckResourceAttr(resourceName, "imported_file_chunk_size", "256"), @@ -247,23 +254,24 @@ func TestAccFSxDataRepositoryAssociation_importedFileChunkSize(t *testing.T) { // lintignore:AT002 func TestAccFSxDataRepositoryAssociation_importedFileChunkSizeUpdated(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association1, association2 fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_importedFileChunkSize(bucketName, fileSystemPath, 256), + Config: testAccDataRepositoryAssociationConfig_importedFileChunkSize(rName, rName, fileSystemPath, 256), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association1), resource.TestCheckResourceAttr(resourceName, "imported_file_chunk_size", "256"), @@ -276,7 +284,7 @@ func TestAccFSxDataRepositoryAssociation_importedFileChunkSizeUpdated(t *testing ImportStateVerifyIgnore: []string{"delete_data_in_filesystem"}, }, { - Config: testAccDataRepositoryAssociationConfig_importedFileChunkSize(bucketName, fileSystemPath, 512), + Config: testAccDataRepositoryAssociationConfig_importedFileChunkSize(rName, rName, fileSystemPath, 512), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association2), testAccCheckDataRepositoryAssociationNotRecreated(&association1, &association2), @@ -289,23 +297,24 @@ func TestAccFSxDataRepositoryAssociation_importedFileChunkSizeUpdated(t *testing func TestAccFSxDataRepositoryAssociation_deleteDataInFilesystem(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_deleteInFilesystem(bucketName, fileSystemPath, "true"), + Config: testAccDataRepositoryAssociationConfig_deleteInFilesystem(rName, rName, fileSystemPath, "true"), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association), resource.TestCheckResourceAttr(resourceName, "delete_data_in_filesystem", "true"), @@ -323,24 +332,25 @@ func TestAccFSxDataRepositoryAssociation_deleteDataInFilesystem(t *testing.T) { func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicy(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" events := []string{"NEW", "CHANGED", "DELETED"} resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_s3AutoExportPolicy(bucketName, fileSystemPath, events), + Config: testAccDataRepositoryAssociationConfig_s3AutoExportPolicy(rName, rName, fileSystemPath, events), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association), resource.TestCheckResourceAttr(resourceName, "s3.0.auto_export_policy.0.events.0", "NEW"), @@ -360,25 +370,26 @@ func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicy(t *testing.T) { func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicyUpdate(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association1, association2 fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" events1 := []string{"NEW", "CHANGED", "DELETED"} events2 := []string{"NEW"} resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_s3AutoExportPolicy(bucketName, fileSystemPath, events1), + Config: testAccDataRepositoryAssociationConfig_s3AutoExportPolicy(rName, rName, fileSystemPath, events1), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association1), resource.TestCheckResourceAttr(resourceName, "s3.0.auto_export_policy.0.events.0", "NEW"), @@ -393,7 +404,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicyUpdate(t *testing.T) ImportStateVerifyIgnore: []string{"delete_data_in_filesystem"}, }, { - Config: testAccDataRepositoryAssociationConfig_s3AutoExportPolicy(bucketName, fileSystemPath, events2), + Config: testAccDataRepositoryAssociationConfig_s3AutoExportPolicy(rName, rName, fileSystemPath, events2), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association2), testAccCheckDataRepositoryAssociationNotRecreated(&association1, &association2), @@ -406,24 +417,25 @@ func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicyUpdate(t *testing.T) func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicy(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" events := []string{"NEW", "CHANGED", "DELETED"} resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_s3AutoImportPolicy(bucketName, fileSystemPath, events), + Config: testAccDataRepositoryAssociationConfig_s3AutoImportPolicy(rName, rName, fileSystemPath, events), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association), resource.TestCheckResourceAttr(resourceName, "s3.0.auto_import_policy.0.events.0", "NEW"), @@ -443,25 +455,26 @@ func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicy(t *testing.T) { func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicyUpdate(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association1, association2 fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" events1 := []string{"NEW", "CHANGED", "DELETED"} events2 := []string{"NEW"} resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_s3AutoImportPolicy(bucketName, fileSystemPath, events1), + Config: testAccDataRepositoryAssociationConfig_s3AutoImportPolicy(rName, rName, fileSystemPath, events1), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association1), resource.TestCheckResourceAttr(resourceName, "s3.0.auto_import_policy.0.events.0", "NEW"), @@ -476,7 +489,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicyUpdate(t *testing.T) ImportStateVerifyIgnore: []string{"delete_data_in_filesystem"}, }, { - Config: testAccDataRepositoryAssociationConfig_s3AutoImportPolicy(bucketName, fileSystemPath, events2), + Config: testAccDataRepositoryAssociationConfig_s3AutoImportPolicy(rName, rName, fileSystemPath, events2), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association2), testAccCheckDataRepositoryAssociationNotRecreated(&association1, &association2), @@ -489,23 +502,24 @@ func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicyUpdate(t *testing.T) func TestAccFSxDataRepositoryAssociation_s3FullPolicy(t *testing.T) { ctx := acctest.Context(t) - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - t.Skip("PERSISTENT_2 deployment_type is not supported in GovCloud partition") - } - var association fsx.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" - bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + // PERSISTENT_2 deployment_type is not supported in GovCloud partition. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataRepositoryAssociationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataRepositoryAssociationConfig_s3FullPolicy(bucketName, fileSystemPath), + Config: testAccDataRepositoryAssociationConfig_s3FullPolicy(rName, rName, fileSystemPath), Check: resource.ComposeTestCheckFunc( testAccCheckDataRepositoryAssociationExists(ctx, resourceName, &association), resource.TestCheckResourceAttr(resourceName, "s3.0.auto_export_policy.0.events.0", "NEW"), @@ -526,25 +540,22 @@ func TestAccFSxDataRepositoryAssociation_s3FullPolicy(t *testing.T) { }) } -func testAccCheckDataRepositoryAssociationExists(ctx context.Context, resourceName string, assoc *fsx.DataRepositoryAssociation) resource.TestCheckFunc { +func testAccCheckDataRepositoryAssociationExists(ctx context.Context, n string, v *fsx.DataRepositoryAssociation) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - association, err := tffsx.FindDataRepositoryAssociationByID(ctx, conn, rs.Primary.ID) + output, err := tffsx.FindDataRepositoryAssociationByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if association == nil { - return fmt.Errorf("FSx Lustre Data Repository Association (%s) not found", rs.Primary.ID) - } - - *assoc = *association + *v = *output return nil } @@ -555,19 +566,23 @@ func testAccCheckDataRepositoryAssociationDestroy(ctx context.Context) resource. conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_fsx_lustre_file_system" { + if rs.Type != "aws_fsx_data_repository_association" { continue } - filesystem, err := tffsx.FindFileSystemByID(ctx, conn, rs.Primary.ID) + _, err := tffsx.FindDataRepositoryAssociationByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { continue } - if filesystem != nil { - return fmt.Errorf("FSx Lustre File System (%s) still exists", rs.Primary.ID) + if err != nil { + return err } + + return fmt.Errorf("FSx for Lustre Data Repository Association %s still exists", rs.Primary.ID) } + return nil } } @@ -592,11 +607,11 @@ func testAccCheckDataRepositoryAssociationRecreated(i, j *fsx.DataRepositoryAsso } } -func testAccDataRepositoryAssociationConfig_s3Bucket(bucketName string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccDataRepositoryAssociationConfig_s3Bucket(rName, bucketName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_2" per_unit_storage_throughput = 125 } @@ -607,8 +622,8 @@ resource "aws_s3_bucket" "test" { `, bucketName)) } -func testAccDataRepositoryAssociationConfig_fileSystemPath(bucketName, fileSystemPath string) string { - return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(bucketName), fmt.Sprintf(` +func testAccDataRepositoryAssociationConfig_fileSystemPath(rName, bucketName, fileSystemPath string) string { + return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(rName, bucketName), fmt.Sprintf(` resource "aws_fsx_data_repository_association" "test" { file_system_id = aws_fsx_lustre_file_system.test.id data_repository_path = "s3://%[1]s" @@ -617,9 +632,10 @@ resource "aws_fsx_data_repository_association" "test" { `, bucketName, fileSystemPath)) } -func testAccDataRepositoryAssociationConfig_importedFileChunkSize(bucketName, fileSystemPath string, fileChunkSize int64) string { +func testAccDataRepositoryAssociationConfig_importedFileChunkSize(rName, bucketName, fileSystemPath string, fileChunkSize int64) string { bucketPath := fmt.Sprintf("s3://%s", bucketName) - return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(bucketName), fmt.Sprintf(` + + return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(rName, bucketName), fmt.Sprintf(` resource "aws_fsx_data_repository_association" "test" { file_system_id = aws_fsx_lustre_file_system.test.id data_repository_path = %[1]q @@ -629,9 +645,9 @@ resource "aws_fsx_data_repository_association" "test" { `, bucketPath, fileSystemPath, fileChunkSize)) } -func testAccDataRepositoryAssociationConfig_deleteInFilesystem(bucketName, fileSystemPath, deleteDataInFilesystem string) string { +func testAccDataRepositoryAssociationConfig_deleteInFilesystem(rName, bucketName, fileSystemPath, deleteDataInFilesystem string) string { bucketPath := fmt.Sprintf("s3://%s", bucketName) - return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(bucketName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(rName, bucketName), fmt.Sprintf(` resource "aws_fsx_data_repository_association" "test" { file_system_id = aws_fsx_lustre_file_system.test.id data_repository_path = %[1]q @@ -641,10 +657,10 @@ resource "aws_fsx_data_repository_association" "test" { `, bucketPath, fileSystemPath, deleteDataInFilesystem)) } -func testAccDataRepositoryAssociationConfig_s3AutoExportPolicy(bucketName, fileSystemPath string, events []string) string { +func testAccDataRepositoryAssociationConfig_s3AutoExportPolicy(rName, bucketName, fileSystemPath string, events []string) string { bucketPath := fmt.Sprintf("s3://%s", bucketName) eventsString := strings.Replace(fmt.Sprintf("%q", events), " ", ", ", -1) - return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(bucketName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(rName, bucketName), fmt.Sprintf(` resource "aws_fsx_data_repository_association" "test" { file_system_id = aws_fsx_lustre_file_system.test.id data_repository_path = %[1]q @@ -659,10 +675,10 @@ resource "aws_fsx_data_repository_association" "test" { `, bucketPath, fileSystemPath, eventsString)) } -func testAccDataRepositoryAssociationConfig_s3AutoImportPolicy(bucketName, fileSystemPath string, events []string) string { +func testAccDataRepositoryAssociationConfig_s3AutoImportPolicy(rName, bucketName, fileSystemPath string, events []string) string { bucketPath := fmt.Sprintf("s3://%s", bucketName) eventsString := strings.Replace(fmt.Sprintf("%q", events), " ", ", ", -1) - return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(bucketName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(rName, bucketName), fmt.Sprintf(` resource "aws_fsx_data_repository_association" "test" { file_system_id = aws_fsx_lustre_file_system.test.id data_repository_path = %[1]q @@ -677,9 +693,9 @@ resource "aws_fsx_data_repository_association" "test" { `, bucketPath, fileSystemPath, eventsString)) } -func testAccDataRepositoryAssociationConfig_s3FullPolicy(bucketName, fileSystemPath string) string { +func testAccDataRepositoryAssociationConfig_s3FullPolicy(rName, bucketName, fileSystemPath string) string { bucketPath := fmt.Sprintf("s3://%s", bucketName) - return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(bucketName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccDataRepositoryAssociationConfig_s3Bucket(rName, bucketName), fmt.Sprintf(` resource "aws_fsx_data_repository_association" "test" { file_system_id = aws_fsx_lustre_file_system.test.id data_repository_path = %[1]q diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 4e4100e762d9..62da3a389358 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -366,9 +366,8 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("subnet_ids", aws.StringValueSlice(filecache.SubnetIds)) d.Set("vpc_id", filecache.VpcId) - if err := d.Set("data_repository_association_ids", filecache.DataRepositoryAssociationIds); err != nil { - return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) - } + dataRepositoryAssociationIDs := aws.StringValueSlice(filecache.DataRepositoryAssociationIds) + d.Set("data_repository_association_ids", dataRepositoryAssociationIDs) if err := d.Set("lustre_configuration", flattenFileCacheLustreConfiguration(filecache.LustreConfiguration)); err != nil { return create.DiagError(names.FSx, create.ErrActionSetting, ResNameFileCache, d.Id(), err) } @@ -381,7 +380,7 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int // Lookup and set Data Repository Associations - dataRepositoryAssociations, _ := findDataRepositoryAssociationsByIDs(ctx, conn, filecache.DataRepositoryAssociationIds) + dataRepositoryAssociations, _ := findDataRepositoryAssociationsByIDs(ctx, conn, dataRepositoryAssociationIDs) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig @@ -616,3 +615,11 @@ func expandFileCacheLustreMetadataConfiguration(l []interface{}) *fsx.FileCacheL } return req } + +func findDataRepositoryAssociationsByIDs(ctx context.Context, conn *fsx.FSx, ids []string) ([]*fsx.DataRepositoryAssociation, error) { + input := &fsx.DescribeDataRepositoryAssociationsInput{ + AssociationIds: aws.StringSlice(ids), + } + + return findDataRepositoryAssociations(ctx, conn, input) +} diff --git a/internal/service/fsx/find.go b/internal/service/fsx/find.go index f64069248c62..d41b184f14a2 100644 --- a/internal/service/fsx/find.go +++ b/internal/service/fsx/find.go @@ -92,114 +92,6 @@ func findFileCacheByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.File return fileCaches[0], nil } -func findDataRepositoryAssociationsByIDs(ctx context.Context, conn *fsx.FSx, ids []*string) ([]*fsx.DataRepositoryAssociation, error) { - input := &fsx.DescribeDataRepositoryAssociationsInput{ - AssociationIds: ids, - } - var dataRepositoryAssociations []*fsx.DataRepositoryAssociation - - err := conn.DescribeDataRepositoryAssociationsPagesWithContext(ctx, input, func(page *fsx.DescribeDataRepositoryAssociationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - dataRepositoryAssociations = append(dataRepositoryAssociations, page.Associations...) - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeDataRepositoryAssociationNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - if err != nil { - return nil, err - } - if len(dataRepositoryAssociations) == 0 || dataRepositoryAssociations[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return dataRepositoryAssociations, nil -} - -func FindFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { - input := &fsx.DescribeFileSystemsInput{ - FileSystemIds: []*string{aws.String(id)}, - } - - var filesystems []*fsx.FileSystem - - err := conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - filesystems = append(filesystems, page.FileSystems...) - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(filesystems) == 0 || filesystems[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(filesystems); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return filesystems[0], nil -} - -func FindDataRepositoryAssociationByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.DataRepositoryAssociation, error) { - input := &fsx.DescribeDataRepositoryAssociationsInput{ - AssociationIds: []*string{aws.String(id)}, - } - - var associations []*fsx.DataRepositoryAssociation - - err := conn.DescribeDataRepositoryAssociationsPagesWithContext(ctx, input, func(page *fsx.DescribeDataRepositoryAssociationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - associations = append(associations, page.Associations...) - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeDataRepositoryAssociationNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(associations) == 0 || associations[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(associations); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return associations[0], nil -} - func FindStorageVirtualMachineByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.StorageVirtualMachine, error) { input := &fsx.DescribeStorageVirtualMachinesInput{ StorageVirtualMachineIds: []*string{aws.String(id)}, @@ -239,45 +131,6 @@ func FindStorageVirtualMachineByID(ctx context.Context, conn *fsx.FSx, id string return storageVirtualMachines[0], nil } -func FindVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { - input := &fsx.DescribeVolumesInput{ - VolumeIds: []*string{aws.String(id)}, - } - - var volumes []*fsx.Volume - - err := conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - volumes = append(volumes, page.Volumes...) - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(volumes) == 0 || volumes[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(volumes); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return volumes[0], nil -} - func FindSnapshotByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Snapshot, error) { input := &fsx.DescribeSnapshotsInput{ SnapshotIds: aws.StringSlice([]string{id}), diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index d6da7521d9c1..ef52808a5e6c 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -18,11 +18,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -52,142 +54,54 @@ func ResourceLustreFileSystem() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "backup_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "dns_name": { - Type: schema.TypeString, - Computed: true, - }, - "export_path": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(3, 900), - validation.StringMatch(regexache.MustCompile(`^s3://`), "must begin with s3://"), - ), - }, - "import_path": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(3, 900), - validation.StringMatch(regexache.MustCompile(`^s3://`), "must begin with s3://"), - ), + "auto_import_policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(fsx.AutoImportPolicyType_Values(), false), }, - "imported_file_chunk_size": { + "automatic_backup_retention_days": { Type: schema.TypeInt, Optional: true, Computed: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(1, 512000), - }, - "mount_name": { - Type: schema.TypeString, - Computed: true, - }, - "network_interface_ids": { - // As explained in https://docs.aws.amazon.com/fsx/latest/LustreGuide/mounting-on-premises.html, the first - // network_interface_id is the primary one, so ordering matters. Use TypeList instead of TypeSet to preserve it. - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + ValidateFunc: validation.IntBetween(0, 90), }, - "owner_id": { + "backup_id": { Type: schema.TypeString, - Computed: true, - }, - "security_group_ids": { - Type: schema.TypeSet, Optional: true, ForceNew: true, - MaxItems: 50, - Elem: &schema.Schema{Type: schema.TypeString}, }, - "storage_capacity": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(1200), - }, - "subnet_ids": { - Type: schema.TypeList, - Required: true, + "copy_tags_to_backups": { + Type: schema.TypeBool, + Optional: true, ForceNew: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), - "vpc_id": { - Type: schema.TypeString, - Computed: true, + Default: false, }, - "weekly_maintenance_start_time": { + "daily_automatic_backup_start_time": { Type: schema.TypeString, Optional: true, Computed: true, ValidateFunc: validation.All( - validation.StringLenBetween(7, 7), - validation.StringMatch(regexache.MustCompile(`^[1-7]:([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format d:HH:MM"), + validation.StringLenBetween(5, 5), + validation.StringMatch(regexache.MustCompile(`^([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format HH:MM"), ), }, - "deployment_type": { + "data_compression_type": { Type: schema.TypeString, Optional: true, - ForceNew: true, - Default: fsx.LustreDeploymentTypeScratch1, - ValidateFunc: validation.StringInSlice(fsx.LustreDeploymentType_Values(), false), + ValidateFunc: validation.StringInSlice(fsx.DataCompressionType_Values(), false), + Default: fsx.DataCompressionTypeNone, }, - "kms_key_id": { + "deployment_type": { Type: schema.TypeString, Optional: true, - Computed: true, ForceNew: true, - ValidateFunc: verify.ValidARN, - }, - "per_unit_storage_throughput": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntInSlice([]int{ - 12, - 40, - 50, - 100, - 125, - 200, - 250, - 500, - 1000, - }), - }, - "automatic_backup_retention_days": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validation.IntBetween(0, 90), + Default: fsx.LustreDeploymentTypeScratch1, + ValidateFunc: validation.StringInSlice(fsx.LustreDeploymentType_Values(), false), }, - "daily_automatic_backup_start_time": { + "dns_name": { Type: schema.TypeString, - Optional: true, Computed: true, - ValidateFunc: validation.All( - validation.StringLenBetween(5, 5), - validation.StringMatch(regexache.MustCompile(`^([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format HH:MM"), - ), - }, - "storage_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.StorageTypeSsd, - ValidateFunc: validation.StringInSlice(fsx.StorageType_Values(), false), }, "drive_cache_type": { Type: schema.TypeString, @@ -195,23 +109,15 @@ func ResourceLustreFileSystem() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringInSlice(fsx.DriveCacheType_Values(), false), }, - "auto_import_policy": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(fsx.AutoImportPolicyType_Values(), false), - }, - "copy_tags_to_backups": { - Type: schema.TypeBool, + "export_path": { + Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, - Default: false, - }, - "data_compression_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(fsx.DataCompressionType_Values(), false), - Default: fsx.DataCompressionTypeNone, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 900), + validation.StringMatch(regexache.MustCompile(`^s3://`), "must begin with s3://"), + ), }, "file_system_type_version": { Type: schema.TypeString, @@ -223,6 +129,29 @@ func ResourceLustreFileSystem() *schema.Resource { validation.StringMatch(regexache.MustCompile(`^[0-9].[0-9]+$`), "must be in format x.y"), ), }, + "import_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 900), + validation.StringMatch(regexache.MustCompile(`^s3://`), "must begin with s3://"), + ), + }, + "imported_file_chunk_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 512000), + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, "log_configuration": { Type: schema.TypeList, Optional: true, @@ -248,6 +177,37 @@ func ResourceLustreFileSystem() *schema.Resource { }, }, }, + "mount_name": { + Type: schema.TypeString, + Computed: true, + }, + "network_interface_ids": { + // As explained in https://docs.aws.amazon.com/fsx/latest/LustreGuide/mounting-on-premises.html, the first + // network_interface_id is the primary one, so ordering matters. Use TypeList instead of TypeSet to preserve it. + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "per_unit_storage_throughput": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntInSlice([]int{ + 12, + 40, + 50, + 100, + 125, + 200, + 250, + 500, + 1000, + }), + }, "root_squash_configuration": { Type: schema.TypeList, Optional: true, @@ -270,6 +230,48 @@ func ResourceLustreFileSystem() *schema.Resource { }, }, }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 50, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "storage_capacity": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1200), + }, + "storage_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: fsx.StorageTypeSsd, + ValidateFunc: validation.StringInSlice(fsx.StorageType_Values(), false), + }, + "subnet_ids": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(7, 7), + validation.StringMatch(regexache.MustCompile(`^[1-7]:([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format d:HH:MM"), + ), + }, }, CustomizeDiff: customdiff.Sequence( @@ -297,152 +299,207 @@ func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - input := &fsx.CreateFileSystemInput{ + inputC := &fsx.CreateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemType: aws.String(fsx.FileSystemTypeLustre), - StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), - StorageType: aws.String(d.Get("storage_type").(string)), - SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), LustreConfiguration: &fsx.CreateFileSystemLustreConfiguration{ DeploymentType: aws.String(d.Get("deployment_type").(string)), }, - Tags: getTagsIn(ctx), + StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), + StorageType: aws.String(d.Get("storage_type").(string)), + SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), + Tags: getTagsIn(ctx), } - - backupInput := &fsx.CreateFileSystemFromBackupInput{ + inputB := &fsx.CreateFileSystemFromBackupInput{ ClientRequestToken: aws.String(id.UniqueId()), - StorageType: aws.String(d.Get("storage_type").(string)), - SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), LustreConfiguration: &fsx.CreateFileSystemLustreConfiguration{ DeploymentType: aws.String(d.Get("deployment_type").(string)), }, - Tags: getTagsIn(ctx), + StorageType: aws.String(d.Get("storage_type").(string)), + SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), + Tags: getTagsIn(ctx), } - //Applicable only for TypePersistent1 and TypePersistent2 - if v, ok := d.GetOk("kms_key_id"); ok { - input.KmsKeyId = aws.String(v.(string)) - backupInput.KmsKeyId = aws.String(v.(string)) + if v, ok := d.GetOk("auto_import_policy"); ok { + inputC.LustreConfiguration.AutoImportPolicy = aws.String(v.(string)) + inputB.LustreConfiguration.AutoImportPolicy = aws.String(v.(string)) } if v, ok := d.GetOk("automatic_backup_retention_days"); ok { - input.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) - backupInput.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) + inputC.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) + inputB.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { - input.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) - backupInput.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + if v, ok := d.GetOk("copy_tags_to_backups"); ok { + inputC.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + inputB.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("export_path"); ok { - input.LustreConfiguration.ExportPath = aws.String(v.(string)) - backupInput.LustreConfiguration.ExportPath = aws.String(v.(string)) + if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { + inputC.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + inputB.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) } - if v, ok := d.GetOk("import_path"); ok { - input.LustreConfiguration.ImportPath = aws.String(v.(string)) - backupInput.LustreConfiguration.ImportPath = aws.String(v.(string)) + if v, ok := d.GetOk("data_compression_type"); ok { + inputC.LustreConfiguration.DataCompressionType = aws.String(v.(string)) + inputB.LustreConfiguration.DataCompressionType = aws.String(v.(string)) } - if v, ok := d.GetOk("imported_file_chunk_size"); ok { - input.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) - backupInput.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("drive_cache_type"); ok { + inputC.LustreConfiguration.DriveCacheType = aws.String(v.(string)) + inputB.LustreConfiguration.DriveCacheType = aws.String(v.(string)) } - if v, ok := d.GetOk("security_group_ids"); ok { - input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) - backupInput.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + if v, ok := d.GetOk("export_path"); ok { + inputC.LustreConfiguration.ExportPath = aws.String(v.(string)) + inputB.LustreConfiguration.ExportPath = aws.String(v.(string)) } - if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { - input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) - backupInput.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + if v, ok := d.GetOk("file_system_type_version"); ok { + inputC.FileSystemTypeVersion = aws.String(v.(string)) + inputB.FileSystemTypeVersion = aws.String(v.(string)) } - if v, ok := d.GetOk("per_unit_storage_throughput"); ok { - input.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int))) - backupInput.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("import_path"); ok { + inputC.LustreConfiguration.ImportPath = aws.String(v.(string)) + inputB.LustreConfiguration.ImportPath = aws.String(v.(string)) } - if v, ok := d.GetOk("drive_cache_type"); ok { - input.LustreConfiguration.DriveCacheType = aws.String(v.(string)) - backupInput.LustreConfiguration.DriveCacheType = aws.String(v.(string)) + if v, ok := d.GetOk("imported_file_chunk_size"); ok { + inputC.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) + inputB.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("auto_import_policy"); ok { - input.LustreConfiguration.AutoImportPolicy = aws.String(v.(string)) - backupInput.LustreConfiguration.AutoImportPolicy = aws.String(v.(string)) + // Applicable only for TypePersistent1 and TypePersistent2. + if v, ok := d.GetOk("kms_key_id"); ok { + inputC.KmsKeyId = aws.String(v.(string)) + inputB.KmsKeyId = aws.String(v.(string)) } - if v, ok := d.GetOk("copy_tags_to_backups"); ok { - input.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) - backupInput.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + if v, ok := d.GetOk("log_configuration"); ok && len(v.([]interface{})) > 0 { + inputC.LustreConfiguration.LogConfiguration = expandLustreLogCreateConfiguration(v.([]interface{})) + inputB.LustreConfiguration.LogConfiguration = expandLustreLogCreateConfiguration(v.([]interface{})) } - if v, ok := d.GetOk("data_compression_type"); ok { - input.LustreConfiguration.DataCompressionType = aws.String(v.(string)) - backupInput.LustreConfiguration.DataCompressionType = aws.String(v.(string)) + if v, ok := d.GetOk("per_unit_storage_throughput"); ok { + inputC.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int))) + inputB.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("file_system_type_version"); ok { - input.FileSystemTypeVersion = aws.String(v.(string)) - backupInput.FileSystemTypeVersion = aws.String(v.(string)) + if v, ok := d.GetOk("root_squash_configuration"); ok && len(v.([]interface{})) > 0 { + inputC.LustreConfiguration.RootSquashConfiguration = expandLustreRootSquashConfiguration(v.([]interface{})) + inputB.LustreConfiguration.RootSquashConfiguration = expandLustreRootSquashConfiguration(v.([]interface{})) } - if v, ok := d.GetOk("log_configuration"); ok && len(v.([]interface{})) > 0 { - input.LustreConfiguration.LogConfiguration = expandLustreLogCreateConfiguration(v.([]interface{})) - backupInput.LustreConfiguration.LogConfiguration = expandLustreLogCreateConfiguration(v.([]interface{})) + if v, ok := d.GetOk("security_group_ids"); ok { + inputC.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + inputB.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } - if v, ok := d.GetOk("root_squash_configuration"); ok && len(v.([]interface{})) > 0 { - input.LustreConfiguration.RootSquashConfiguration = expandLustreRootSquashConfiguration(v.([]interface{})) - backupInput.LustreConfiguration.RootSquashConfiguration = expandLustreRootSquashConfiguration(v.([]interface{})) + if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { + inputC.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + inputB.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) } if v, ok := d.GetOk("backup_id"); ok { - backupInput.BackupId = aws.String(v.(string)) + backupID := v.(string) + inputB.BackupId = aws.String(backupID) - log.Printf("[DEBUG] Creating FSx Lustre File System: %s", backupInput) - result, err := conn.CreateFileSystemFromBackupWithContext(ctx, backupInput) + output, err := conn.CreateFileSystemFromBackupWithContext(ctx, inputB) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx Lustre File System from backup: %s", err) + return sdkdiag.AppendErrorf(diags, "creating FSx for Lustre File System from backup (%s): %s", backupID, err) } - d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) } else { - log.Printf("[DEBUG] Creating FSx Lustre File System: %s", input) - result, err := conn.CreateFileSystemWithContext(ctx, input) + output, err := conn.CreateFileSystemWithContext(ctx, inputC) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx Lustre File System: %s", err) + return sdkdiag.AppendErrorf(diags, "creating FSx for Lustre File System: %s", err) } - d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) } if _, err := waitFileSystemCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Lustre File System (%s) create: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) create: %s", d.Id(), err) } return append(diags, resourceLustreFileSystemRead(ctx, d, meta)...) } +func resourceLustreFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FSxConn(ctx) + + filesystem, err := FindLustreFileSystemByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FSx for Lustre File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading FSx for Lustre File System (%s): %s", d.Id(), err) + } + + lustreConfig := filesystem.LustreConfiguration + if lustreConfig.DataRepositoryConfiguration == nil { + // Initialize an empty structure to simplify d.Set() handling. + lustreConfig.DataRepositoryConfiguration = &fsx.DataRepositoryConfiguration{} + } + + d.Set("arn", filesystem.ResourceARN) + d.Set("auto_import_policy", lustreConfig.DataRepositoryConfiguration.AutoImportPolicy) + d.Set("automatic_backup_retention_days", lustreConfig.AutomaticBackupRetentionDays) + d.Set("copy_tags_to_backups", lustreConfig.CopyTagsToBackups) + d.Set("daily_automatic_backup_start_time", lustreConfig.DailyAutomaticBackupStartTime) + d.Set("data_compression_type", lustreConfig.DataCompressionType) + d.Set("deployment_type", lustreConfig.DeploymentType) + d.Set("dns_name", filesystem.DNSName) + d.Set("drive_cache_type", lustreConfig.DriveCacheType) + d.Set("export_path", lustreConfig.DataRepositoryConfiguration.ExportPath) + d.Set("file_system_type_version", filesystem.FileSystemTypeVersion) + d.Set("import_path", lustreConfig.DataRepositoryConfiguration.ImportPath) + d.Set("imported_file_chunk_size", lustreConfig.DataRepositoryConfiguration.ImportedFileChunkSize) + d.Set("kms_key_id", filesystem.KmsKeyId) + if err := d.Set("log_configuration", flattenLustreLogConfiguration(lustreConfig.LogConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting log_configuration: %s", err) + } + d.Set("mount_name", lustreConfig.MountName) + d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)) + d.Set("owner_id", filesystem.OwnerId) + d.Set("per_unit_storage_throughput", lustreConfig.PerUnitStorageThroughput) + if err := d.Set("root_squash_configuration", flattenLustreRootSquashConfiguration(lustreConfig.RootSquashConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting root_squash_configuration: %s", err) + } + d.Set("storage_capacity", filesystem.StorageCapacity) + d.Set("storage_type", filesystem.StorageType) + d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)) + d.Set("vpc_id", filesystem.VpcId) + d.Set("weekly_maintenance_start_time", lustreConfig.WeeklyMaintenanceStartTime) + + setTagsOut(ctx, filesystem.Tags) + + return diags +} + func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept("tags_all", "tags") { - var waitAdminAction = false + if d.HasChangesExcept("tags", "tags_all") { + waitAdminAction := false input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), - FileSystemId: aws.String(d.Id()), LustreConfiguration: &fsx.UpdateFileSystemLustreConfiguration{}, + FileSystemId: aws.String(d.Id()), } - if d.HasChange("weekly_maintenance_start_time") { - input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string)) + if d.HasChange("auto_import_policy") { + input.LustreConfiguration.AutoImportPolicy = aws.String(d.Get("auto_import_policy").(string)) } if d.HasChange("automatic_backup_retention_days") { @@ -453,14 +510,6 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, input.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string)) } - if d.HasChange("auto_import_policy") { - input.LustreConfiguration.AutoImportPolicy = aws.String(d.Get("auto_import_policy").(string)) - } - - if d.HasChange("storage_capacity") { - input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int))) - } - if v, ok := d.GetOk("data_compression_type"); ok { input.LustreConfiguration.DataCompressionType = aws.String(v.(string)) } @@ -475,18 +524,27 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, waitAdminAction = true } + if d.HasChange("storage_capacity") { + input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int))) + } + + if d.HasChange("weekly_maintenance_start_time") { + input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string)) + } + _, err := conn.UpdateFileSystemWithContext(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating FSX Lustre File System (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating FSX for Lustre File System (%s): %s", d.Id(), err) } if _, err := waitFileSystemUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Lustre File System (%s) update: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) update: %s", d.Id(), err) } if waitAdminAction { if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Lustre File System (%s) Log Configuratio to be updated: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) administrative action complete: %s", d.Id(), err) } } } @@ -494,102 +552,25 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, return append(diags, resourceLustreFileSystemRead(ctx, d, meta)...) } -func resourceLustreFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) - - filesystem, err := FindFileSystemByID(ctx, conn, d.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] FSx Lustre File System (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading FSx Lustre File System (%s): %s", d.Id(), err) - } - - lustreConfig := filesystem.LustreConfiguration - - if filesystem.WindowsConfiguration != nil { - return sdkdiag.AppendErrorf(diags, "expected FSx Lustre File System, found FSx Windows File System: %s", d.Id()) - } - - if lustreConfig == nil { - return sdkdiag.AppendErrorf(diags, "describing FSx Lustre File System (%s): empty Lustre configuration", d.Id()) - } - - if lustreConfig.DataRepositoryConfiguration == nil { - // Initialize an empty structure to simplify d.Set() handling - lustreConfig.DataRepositoryConfiguration = &fsx.DataRepositoryConfiguration{} - } - - d.Set("arn", filesystem.ResourceARN) - d.Set("dns_name", filesystem.DNSName) - d.Set("export_path", lustreConfig.DataRepositoryConfiguration.ExportPath) - d.Set("import_path", lustreConfig.DataRepositoryConfiguration.ImportPath) - d.Set("auto_import_policy", lustreConfig.DataRepositoryConfiguration.AutoImportPolicy) - d.Set("imported_file_chunk_size", lustreConfig.DataRepositoryConfiguration.ImportedFileChunkSize) - d.Set("deployment_type", lustreConfig.DeploymentType) - d.Set("per_unit_storage_throughput", lustreConfig.PerUnitStorageThroughput) - d.Set("mount_name", lustreConfig.MountName) - d.Set("storage_type", filesystem.StorageType) - d.Set("drive_cache_type", lustreConfig.DriveCacheType) - d.Set("kms_key_id", filesystem.KmsKeyId) - - if err := d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting network_interface_ids: %s", err) - } - - d.Set("owner_id", filesystem.OwnerId) - d.Set("storage_capacity", filesystem.StorageCapacity) - - if err := d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting subnet_ids: %s", err) - } - - if err := d.Set("log_configuration", flattenLustreLogConfiguration(lustreConfig.LogConfiguration)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting log_configuration: %s", err) - } - - if err := d.Set("root_squash_configuration", flattenLustreRootSquashConfiguration(lustreConfig.RootSquashConfiguration)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting root_squash_configuration: %s", err) - } - - setTagsOut(ctx, filesystem.Tags) - - d.Set("vpc_id", filesystem.VpcId) - d.Set("weekly_maintenance_start_time", lustreConfig.WeeklyMaintenanceStartTime) - d.Set("automatic_backup_retention_days", lustreConfig.AutomaticBackupRetentionDays) - d.Set("daily_automatic_backup_start_time", lustreConfig.DailyAutomaticBackupStartTime) - d.Set("copy_tags_to_backups", lustreConfig.CopyTagsToBackups) - d.Set("data_compression_type", lustreConfig.DataCompressionType) - d.Set("file_system_type_version", filesystem.FileSystemTypeVersion) - - return diags -} - func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - request := &fsx.DeleteFileSystemInput{ + log.Printf("[DEBUG] Deleting FSx for Lustre File System: %s", d.Id()) + _, err := conn.DeleteFileSystemWithContext(ctx, &fsx.DeleteFileSystemInput{ FileSystemId: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Deleting FSx Lustre File System: %s", d.Id()) - _, err := conn.DeleteFileSystemWithContext(ctx, request) + }) if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting FSx Lustre File System (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting FSx for Lustre File System (%s): %s", d.Id(), err) } if _, err := waitFileSystemDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Lustre File System (%s) to deleted: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) delete: %s", d.Id(), err) } return diags @@ -679,3 +660,108 @@ func logStateFunc(v interface{}) string { } return value } + +func FindFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { + input := &fsx.DescribeFileSystemsInput{ + FileSystemIds: []*string{aws.String(id)}, + } + + var filesystems []*fsx.FileSystem + + err := conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + filesystems = append(filesystems, page.FileSystems...) + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if len(filesystems) == 0 || filesystems[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + if count := len(filesystems); count > 1 { + return nil, tfresource.NewTooManyResultsError(count, input) + } + + return filesystems[0], nil +} + +func FindLustreFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { + output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeLustre) + + if err != nil { + return nil, err + } + + if output.LustreConfiguration == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output, nil +} + +func findFileSystemByIDAndType(ctx context.Context, conn *fsx.FSx, fsID, fsType string) (*fsx.FileSystem, error) { + input := &fsx.DescribeFileSystemsInput{ + FileSystemIds: aws.StringSlice([]string{fsID}), + } + filter := func(fs *fsx.FileSystem) bool { + return aws.StringValue(fs.FileSystemType) == fsType + } + + return findFileSystem(ctx, conn, input, filter) +} + +func findFileSystem(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeFileSystemsInput, filter tfslices.Predicate[*fsx.FileSystem]) (*fsx.FileSystem, error) { + output, err := findFileSystems(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findFileSystems(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeFileSystemsInput, filter tfslices.Predicate[*fsx.FileSystem]) ([]*fsx.FileSystem, error) { + var output []*fsx.FileSystem + + err := conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.FileSystems { + if v != nil && filter(v) { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/internal/service/fsx/lustre_file_system_test.go b/internal/service/fsx/lustre_file_system_test.go index 8952b6393992..459474e0f9b2 100644 --- a/internal/service/fsx/lustre_file_system_test.go +++ b/internal/service/fsx/lustre_file_system_test.go @@ -25,7 +25,7 @@ func TestAccFSxLustreFileSystem_basic(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" - + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) deploymentType := fsx.LustreDeploymentTypeScratch1 if acctest.Partition() == endpoints.AwsUsGovPartitionID { deploymentType = fsx.LustreDeploymentTypeScratch2 // SCRATCH_1 not supported in GovCloud @@ -38,30 +38,30 @@ func TestAccFSxLustreFileSystem_basic(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_subnetIDs1(), - Check: resource.ComposeTestCheckFunc( + Config: testAccLustreFileSystemConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`file-system/fs-.+`)), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), + resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeNone), + resource.TestCheckResourceAttr(resourceName, "deployment_type", deploymentType), resource.TestMatchResourceAttr(resourceName, "dns_name", regexache.MustCompile(`fs-.+\.fsx\.`)), resource.TestCheckResourceAttr(resourceName, "export_path", ""), resource.TestCheckResourceAttr(resourceName, "import_path", ""), resource.TestCheckResourceAttr(resourceName, "imported_file_chunk_size", "0"), + resource.TestCheckResourceAttr(resourceName, "log_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.level", "DISABLED"), resource.TestCheckResourceAttrSet(resourceName, "mount_name"), resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "2"), acctest.CheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"), + resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), - resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestMatchResourceAttr(resourceName, "vpc_id", regexache.MustCompile(`^vpc-.+`)), resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexache.MustCompile(`^\d:\d\d:\d\d$`)), - resource.TestCheckResourceAttr(resourceName, "deployment_type", deploymentType), - resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), - resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd), - resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), - resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeNone), - resource.TestCheckResourceAttr(resourceName, "log_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "log_configuration.0.level", "DISABLED"), ), }, { @@ -78,6 +78,7 @@ func TestAccFSxLustreFileSystem_disappears(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -86,7 +87,7 @@ func TestAccFSxLustreFileSystem_disappears(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_subnetIDs1(), + Config: testAccLustreFileSystemConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceLustreFileSystem(), resourceName), @@ -101,6 +102,7 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -109,7 +111,7 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_compression(), + Config: testAccLustreFileSystemConfig_compression(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeLz4), @@ -122,14 +124,14 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_subnetIDs1(), + Config: testAccLustreFileSystemConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeNone), ), }, { - Config: testAccLustreFileSystemConfig_compression(), + Config: testAccLustreFileSystemConfig_compression(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeLz4), @@ -258,6 +260,7 @@ func TestAccFSxLustreFileSystem_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -266,7 +269,7 @@ func TestAccFSxLustreFileSystem_securityGroupIDs(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_securityGroupIDs1(), + Config: testAccLustreFileSystemConfig_securityGroupIDs1(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), @@ -279,7 +282,7 @@ func TestAccFSxLustreFileSystem_securityGroupIDs(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_securityGroupIDs2(), + Config: testAccLustreFileSystemConfig_securityGroupIDs2(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemRecreated(&filesystem1, &filesystem2), @@ -294,6 +297,7 @@ func TestAccFSxLustreFileSystem_storageCapacity(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -302,7 +306,7 @@ func TestAccFSxLustreFileSystem_storageCapacity(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_storageCapacity(7200), + Config: testAccLustreFileSystemConfig_storageCapacity(rName, 7200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "7200"), @@ -315,7 +319,7 @@ func TestAccFSxLustreFileSystem_storageCapacity(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_storageCapacity(1200), + Config: testAccLustreFileSystemConfig_storageCapacity(rName, 1200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemRecreated(&filesystem1, &filesystem2), @@ -330,6 +334,7 @@ func TestAccFSxLustreFileSystem_storageCapacityUpdate(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2, filesystem3 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -338,7 +343,7 @@ func TestAccFSxLustreFileSystem_storageCapacityUpdate(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_storageCapacityScratch2(7200), + Config: testAccLustreFileSystemConfig_storageCapacityScratch2(rName, 7200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "7200"), @@ -351,7 +356,7 @@ func TestAccFSxLustreFileSystem_storageCapacityUpdate(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_storageCapacityScratch2(1200), + Config: testAccLustreFileSystemConfig_storageCapacityScratch2(rName, 1200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemRecreated(&filesystem1, &filesystem2), @@ -359,7 +364,7 @@ func TestAccFSxLustreFileSystem_storageCapacityUpdate(t *testing.T) { ), }, { - Config: testAccLustreFileSystemConfig_storageCapacityScratch2(7200), + Config: testAccLustreFileSystemConfig_storageCapacityScratch2(rName, 7200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem3), testAccCheckLustreFileSystemNotRecreated(&filesystem2, &filesystem3), @@ -374,6 +379,7 @@ func TestAccFSxLustreFileSystem_fileSystemTypeVersion(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -382,7 +388,7 @@ func TestAccFSxLustreFileSystem_fileSystemTypeVersion(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_typeVersion("2.10"), + Config: testAccLustreFileSystemConfig_typeVersion(rName, "2.10"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "file_system_type_version", "2.10"), @@ -395,7 +401,7 @@ func TestAccFSxLustreFileSystem_fileSystemTypeVersion(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_typeVersion("2.12"), + Config: testAccLustreFileSystemConfig_typeVersion(rName, "2.12"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemRecreated(&filesystem1, &filesystem2), @@ -410,6 +416,7 @@ func TestAccFSxLustreFileSystem_tags(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2, filesystem3 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -418,7 +425,7 @@ func TestAccFSxLustreFileSystem_tags(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_tags1("key1", "value1"), + Config: testAccLustreFileSystemConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -432,7 +439,7 @@ func TestAccFSxLustreFileSystem_tags(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_tags2("key1", "value1updated", "key2", "value2"), + Config: testAccLustreFileSystemConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), @@ -442,7 +449,7 @@ func TestAccFSxLustreFileSystem_tags(t *testing.T) { ), }, { - Config: testAccLustreFileSystemConfig_tags1("key2", "value2"), + Config: testAccLustreFileSystemConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem3), testAccCheckLustreFileSystemNotRecreated(&filesystem2, &filesystem3), @@ -458,6 +465,7 @@ func TestAccFSxLustreFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -466,7 +474,7 @@ func TestAccFSxLustreFileSystem_weeklyMaintenanceStartTime(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_weeklyMaintenanceStartTime("1:01:01"), + Config: testAccLustreFileSystemConfig_weeklyMaintenanceStartTime(rName, "1:01:01"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "1:01:01"), @@ -479,7 +487,7 @@ func TestAccFSxLustreFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_weeklyMaintenanceStartTime("2:02:02"), + Config: testAccLustreFileSystemConfig_weeklyMaintenanceStartTime(rName, "2:02:02"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), @@ -494,6 +502,7 @@ func TestAccFSxLustreFileSystem_automaticBackupRetentionDays(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -502,7 +511,7 @@ func TestAccFSxLustreFileSystem_automaticBackupRetentionDays(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_automaticBackupRetentionDays(90), + Config: testAccLustreFileSystemConfig_automaticBackupRetentionDays(rName, 90), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "90"), @@ -515,7 +524,7 @@ func TestAccFSxLustreFileSystem_automaticBackupRetentionDays(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_automaticBackupRetentionDays(0), + Config: testAccLustreFileSystemConfig_automaticBackupRetentionDays(rName, 0), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), @@ -523,7 +532,7 @@ func TestAccFSxLustreFileSystem_automaticBackupRetentionDays(t *testing.T) { ), }, { - Config: testAccLustreFileSystemConfig_automaticBackupRetentionDays(1), + Config: testAccLustreFileSystemConfig_automaticBackupRetentionDays(rName, 1), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "1"), @@ -537,6 +546,7 @@ func TestAccFSxLustreFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -545,7 +555,7 @@ func TestAccFSxLustreFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_dailyAutomaticBackupStartTime("01:01"), + Config: testAccLustreFileSystemConfig_dailyAutomaticBackupStartTime(rName, "01:01"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", "01:01"), @@ -558,7 +568,7 @@ func TestAccFSxLustreFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_dailyAutomaticBackupStartTime("02:02"), + Config: testAccLustreFileSystemConfig_dailyAutomaticBackupStartTime(rName, "02:02"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), @@ -573,6 +583,7 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -581,7 +592,7 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_persistent1DeploymentType(50), + Config: testAccLustreFileSystemConfig_persistent1DeploymentType(rName, 50), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), // per_unit_storage_throughput=50 is only available with deployment_type=PERSISTENT_1, so we test both here. @@ -607,6 +618,7 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -615,7 +627,7 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_persistent2DeploymentType(125), + Config: testAccLustreFileSystemConfig_persistent2DeploymentType(rName, 125), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), // per_unit_storage_throughput=125 is only available with deployment_type=PERSISTENT_2, so we test both here. @@ -681,6 +693,7 @@ func TestAccFSxLustreFileSystem_rootSquashConfig(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -689,7 +702,7 @@ func TestAccFSxLustreFileSystem_rootSquashConfig(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_rootSquash("365534:65534"), + Config: testAccLustreFileSystemConfig_rootSquash(rName, "365534:65534"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "root_squash_configuration.#", "1"), @@ -703,7 +716,7 @@ func TestAccFSxLustreFileSystem_rootSquashConfig(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_rootSquash("355534:64534"), + Config: testAccLustreFileSystemConfig_rootSquash(rName, "355534:64534"), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "root_squash_configuration.#", "1"), @@ -718,6 +731,7 @@ func TestAccFSxLustreFileSystem_fromBackup(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -726,7 +740,7 @@ func TestAccFSxLustreFileSystem_fromBackup(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_fromBackup(), + Config: testAccLustreFileSystemConfig_fromBackup(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "50"), @@ -750,6 +764,7 @@ func TestAccFSxLustreFileSystem_kmsKeyID(t *testing.T) { resourceName := "aws_fsx_lustre_file_system.test" kmsKeyResourceName1 := "aws_kms_key.test1" kmsKeyResourceName2 := "aws_kms_key.test2" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -758,7 +773,7 @@ func TestAccFSxLustreFileSystem_kmsKeyID(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_kmsKeyID1(), + Config: testAccLustreFileSystemConfig_kmsKeyID1(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1), @@ -772,7 +787,7 @@ func TestAccFSxLustreFileSystem_kmsKeyID(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccLustreFileSystemConfig_kmsKeyID2(), + Config: testAccLustreFileSystemConfig_kmsKeyID2(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1), @@ -788,6 +803,7 @@ func TestAccFSxLustreFileSystem_deploymentTypeScratch2(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -796,7 +812,7 @@ func TestAccFSxLustreFileSystem_deploymentTypeScratch2(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_deploymentType(fsx.LustreDeploymentTypeScratch2), + Config: testAccLustreFileSystemConfig_deploymentType(rName, fsx.LustreDeploymentTypeScratch2), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch2), @@ -818,6 +834,7 @@ func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheRead(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -826,7 +843,7 @@ func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheRead(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_hddStorageType(fsx.DriveCacheTypeRead), + Config: testAccLustreFileSystemConfig_hddStorageType(rName, fsx.DriveCacheTypeRead), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeHdd), @@ -847,6 +864,7 @@ func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheNone(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -855,7 +873,7 @@ func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheNone(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_hddStorageType(fsx.DriveCacheTypeNone), + Config: testAccLustreFileSystemConfig_hddStorageType(rName, fsx.DriveCacheTypeNone), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeHdd), @@ -876,6 +894,7 @@ func TestAccFSxLustreFileSystem_copyTagsToBackups(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, @@ -884,7 +903,7 @@ func TestAccFSxLustreFileSystem_copyTagsToBackups(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_copyTagsToBackups(), + Config: testAccLustreFileSystemConfig_copyTagsToBackups(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "true"), @@ -936,25 +955,22 @@ func TestAccFSxLustreFileSystem_autoImportPolicy(t *testing.T) { }) } -func testAccCheckLustreFileSystemExists(ctx context.Context, resourceName string, fs *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckLustreFileSystemExists(ctx context.Context, n string, v *fsx.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - filesystem, err := tffsx.FindFileSystemByID(ctx, conn, rs.Primary.ID) + output, err := tffsx.FindLustreFileSystemByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if filesystem == nil { - return fmt.Errorf("FSx Lustre File System (%s) not found", rs.Primary.ID) - } - - *fs = *filesystem + *v = *output return nil } @@ -969,15 +985,19 @@ func testAccCheckLustreFileSystemDestroy(ctx context.Context) resource.TestCheck continue } - filesystem, err := tffsx.FindFileSystemByID(ctx, conn, rs.Primary.ID) + _, err := tffsx.FindLustreFileSystemByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { continue } - if filesystem != nil { - return fmt.Errorf("FSx Lustre File System (%s) still exists", rs.Primary.ID) + if err != nil { + return err } + + return fmt.Errorf("FSx for Lustre File System %s still exists", rs.Primary.ID) } + return nil } } @@ -985,7 +1005,7 @@ func testAccCheckLustreFileSystemDestroy(ctx context.Context) resource.TestCheck func testAccCheckLustreFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx File System (%s) recreated", aws.StringValue(i.FileSystemId)) + return fmt.Errorf("FSx for File System (%s) recreated", aws.StringValue(i.FileSystemId)) } return nil @@ -995,31 +1015,31 @@ func testAccCheckLustreFileSystemNotRecreated(i, j *fsx.FileSystem) resource.Tes func testAccCheckLustreFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + return fmt.Errorf("FSx for File System (%s) not recreated", aws.StringValue(i.FileSystemId)) } return nil } } -func testAccLustreFileSystemBaseConfig() string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), ` +func testAccLustreFileSystemConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), ` data "aws_partition" "current" {} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" +`) } -resource "aws_subnet" "test1" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] +func testAccLustreFileSystemConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), ` +resource "aws_fsx_lustre_file_system" "test" { + storage_capacity = 1200 + subnet_ids = aws_subnet.test[*].id + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } `) } func testAccLustreFileSystemConfig_exportPath(rName, exportPrefix string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } @@ -1028,14 +1048,18 @@ resource "aws_fsx_lustre_file_system" "test" { export_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" import_path = "s3://${aws_s3_bucket.test.bucket}" storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } `, rName, exportPrefix)) } func testAccLustreFileSystemConfig_importPath(rName, importPrefix string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } @@ -1043,14 +1067,18 @@ resource "aws_s3_bucket" "test" { resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } `, rName, importPrefix)) } func testAccLustreFileSystemConfig_importedChunkSize(rName string, importedFileChunkSize int) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } @@ -1059,17 +1087,21 @@ resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}" imported_file_chunk_size = %[2]d storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } `, rName, importedFileChunkSize)) } -func testAccLustreFileSystemConfig_securityGroupIDs1() string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), ` +func testAccLustreFileSystemConfig_securityGroupIDs1(rName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_security_group" "test1" { - description = "security group for FSx testing" - vpc_id = aws_vpc.test.id + name = "%[1]s-1" + vpc_id = aws_vpc.test.id ingress { cidr_blocks = [aws_vpc.test.cidr_block] @@ -1084,22 +1116,30 @@ resource "aws_security_group" "test1" { protocol = "-1" to_port = 0 } + + tags = { + Name = %[1]q + } } resource "aws_fsx_lustre_file_system" "test" { security_group_ids = [aws_security_group.test1.id] storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } -`) +`, rName)) } -func testAccLustreFileSystemConfig_securityGroupIDs2() string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), ` +func testAccLustreFileSystemConfig_securityGroupIDs2(rName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_security_group" "test1" { - description = "security group for FSx testing" - vpc_id = aws_vpc.test.id + name = "%[1]s-1" + vpc_id = aws_vpc.test.id ingress { cidr_blocks = [aws_vpc.test.cidr_block] @@ -1114,11 +1154,15 @@ resource "aws_security_group" "test1" { protocol = "-1" to_port = 0 } + + tags = { + Name = %[1]q + } } resource "aws_security_group" "test2" { - description = "security group for FSx testing" - vpc_id = aws_vpc.test.id + name = "%[1]s-2" + vpc_id = aws_vpc.test.id ingress { cidr_blocks = [aws_vpc.test.cidr_block] @@ -1133,63 +1177,73 @@ resource "aws_security_group" "test2" { protocol = "-1" to_port = 0 } + + tags = { + Name = %[1]q + } } resource "aws_fsx_lustre_file_system" "test" { security_group_ids = [aws_security_group.test1.id, aws_security_group.test2.id] storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } -`) +`, rName)) } -func testAccLustreFileSystemConfig_typeVersion(fileSystemTypeVersion string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_typeVersion(rName, fileSystemTypeVersion string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { - file_system_type_version = %[1]q + file_system_type_version = %[2]q storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } -`, fileSystemTypeVersion)) +`, rName, fileSystemTypeVersion)) } -func testAccLustreFileSystemConfig_storageCapacity(storageCapacity int) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_storageCapacity(rName string, storageCapacity int) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { - storage_capacity = %[1]d - subnet_ids = [aws_subnet.test1.id] + storage_capacity = %[2]d + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } -`, storageCapacity)) +`, rName, storageCapacity)) } -func testAccLustreFileSystemConfig_storageCapacityScratch2(storageCapacity int) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_storageCapacityScratch2(rName string, storageCapacity int) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { - storage_capacity = %[1]d - subnet_ids = [aws_subnet.test1.id] + storage_capacity = %[2]d + subnet_ids = aws_subnet.test[*].id deployment_type = "SCRATCH_2" -} -`, storageCapacity)) -} -func testAccLustreFileSystemConfig_subnetIDs1() string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), ` -resource "aws_fsx_lustre_file_system" "test" { - storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] - deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + tags = { + Name = %[1]q + } } -`) +`, rName, storageCapacity)) } -func testAccLustreFileSystemConfig_tags1(tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 tags = { @@ -1199,11 +1253,11 @@ resource "aws_fsx_lustre_file_system" "test" { `, tagKey1, tagValue1)) } -func testAccLustreFileSystemConfig_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 tags = { @@ -1214,146 +1268,194 @@ resource "aws_fsx_lustre_file_system" "test" { `, tagKey1, tagValue1, tagKey2, tagValue2)) } -func testAccLustreFileSystemConfig_weeklyMaintenanceStartTime(weeklyMaintenanceStartTime string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_weeklyMaintenanceStartTime(rName, weeklyMaintenanceStartTime string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] - weekly_maintenance_start_time = %[1]q + subnet_ids = aws_subnet.test[*].id + weekly_maintenance_start_time = %[2]q deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } -`, weeklyMaintenanceStartTime)) +`, rName, weeklyMaintenanceStartTime)) } -func testAccLustreFileSystemConfig_dailyAutomaticBackupStartTime(dailyAutomaticBackupStartTime string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_dailyAutomaticBackupStartTime(rName, dailyAutomaticBackupStartTime string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" per_unit_storage_throughput = 50 - daily_automatic_backup_start_time = %[1]q + daily_automatic_backup_start_time = %[2]q automatic_backup_retention_days = 1 + + tags = { + Name = %[1]q + } } -`, dailyAutomaticBackupStartTime)) +`, rName, dailyAutomaticBackupStartTime)) } -func testAccLustreFileSystemConfig_automaticBackupRetentionDays(retention int) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_automaticBackupRetentionDays(rName string, retention int) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" per_unit_storage_throughput = 50 - automatic_backup_retention_days = %[1]d + automatic_backup_retention_days = %[2]d + + tags = { + Name = %[1]q + } } -`, retention)) +`, rName, retention)) } -func testAccLustreFileSystemConfig_deploymentType(deploymentType string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_deploymentType(rName, deploymentType string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] - deployment_type = %[1]q + subnet_ids = aws_subnet.test[*].id + deployment_type = %[2]q + + tags = { + Name = %[1]q + } } -`, deploymentType)) +`, rName, deploymentType)) } -func testAccLustreFileSystemConfig_persistent1DeploymentType(perUnitStorageThroughput int) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_persistent1DeploymentType(rName string, perUnitStorageThroughput int) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" - per_unit_storage_throughput = %[1]d + per_unit_storage_throughput = %[2]d + + tags = { + Name = %[1]q + } } -`, perUnitStorageThroughput)) +`, rName, perUnitStorageThroughput)) } -func testAccLustreFileSystemConfig_persistent2DeploymentType(perUnitStorageThroughput int) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_persistent2DeploymentType(rName string, perUnitStorageThroughput int) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_2" - per_unit_storage_throughput = %[1]d + per_unit_storage_throughput = %[2]d + + tags = { + Name = %[1]q + } } -`, perUnitStorageThroughput)) +`, rName, perUnitStorageThroughput)) } -func testAccLustreFileSystemConfig_fromBackup() string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), ` +func testAccLustreFileSystemConfig_fromBackup(rName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "base" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" per_unit_storage_throughput = 50 + + tags = { + Name = %[1]q + } } resource "aws_fsx_backup" "test" { file_system_id = aws_fsx_lustre_file_system.base.id + + tags = { + Name = %[1]q + } } resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" per_unit_storage_throughput = 50 backup_id = aws_fsx_backup.test.id + + tags = { + Name = %[1]q + } } -`) +`, rName)) } -func testAccLustreFileSystemConfig_kmsKeyID1() string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), ` +func testAccLustreFileSystemConfig_kmsKeyID1(rName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_kms_key" "test1" { - description = "FSx KMS Testing key" + description = "%[1]s-1" deletion_window_in_days = 7 } resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" per_unit_storage_throughput = 50 kms_key_id = aws_kms_key.test1.arn + + tags = { + Name = %[1]q + } } -`) +`, rName)) } -func testAccLustreFileSystemConfig_kmsKeyID2() string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), ` +func testAccLustreFileSystemConfig_kmsKeyID2(rName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_kms_key" "test2" { - description = "FSx KMS Testing key" + description = "%[1]s-2" deletion_window_in_days = 7 } resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" per_unit_storage_throughput = 50 kms_key_id = aws_kms_key.test2.arn + + tags = { + Name = %[1]q + } } -`) +`, rName)) } -func testAccLustreFileSystemConfig_hddStorageType(drive_cache_type string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_hddStorageType(rName, driveCacheType string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 6000 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_1" per_unit_storage_throughput = 12 storage_type = "HDD" - drive_cache_type = %[1]q + drive_cache_type = %[2]q + + tags = { + Name = %[1]q + } } -`, drive_cache_type)) +`, rName, driveCacheType)) } func testAccLustreFileSystemConfig_autoImportPolicy(rName, exportPrefix, policy string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } @@ -1363,64 +1465,84 @@ resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}" auto_import_policy = %[3]q storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 + + tags = { + Name = %[1]q + } } `, rName, exportPrefix, policy)) } -func testAccLustreFileSystemConfig_copyTagsToBackups() string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), ` +func testAccLustreFileSystemConfig_copyTagsToBackups(rName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 deployment_type = "PERSISTENT_1" - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id per_unit_storage_throughput = 50 copy_tags_to_backups = true + + tags = { + Name = %[1]q + } } -`) +`, rName)) } -func testAccLustreFileSystemConfig_compression() string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), ` +func testAccLustreFileSystemConfig_compression(rName string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 data_compression_type = "LZ4" + + tags = { + Name = %[1]q + } } -`) +`, rName)) } func testAccLustreFileSystemConfig_log(rName, status string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource aws_cloudwatch_log_group "test" { name = "/aws/fsx/%[1]s" } resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 log_configuration { destination = aws_cloudwatch_log_group.test.arn level = %[2]q } + + tags = { + Name = %[1]q + } } `, rName, status)) } -func testAccLustreFileSystemConfig_rootSquash(uid string) string { - return acctest.ConfigCompose(testAccLustreFileSystemBaseConfig(), fmt.Sprintf(` +func testAccLustreFileSystemConfig_rootSquash(rName, uid string) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 - subnet_ids = [aws_subnet.test1.id] + subnet_ids = aws_subnet.test[*].id deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 root_squash_configuration { - root_squash = %[1]q + root_squash = %[2]q + } + + tags = { + Name = %[1]q } } -`, uid)) +`, rName, uid)) } diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index 5e37ce862184..ea740f392d9d 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -30,12 +31,12 @@ import ( // @SDKResource("aws_fsx_openzfs_file_system", name="OpenZFS File System") // @Tags(identifierAttribute="arn") -func ResourceOpenzfsFileSystem() *schema.Resource { +func ResourceOpenZFSFileSystem() *schema.Resource { return &schema.Resource{ - CreateWithoutTimeout: resourceOpenzfsFileSystemCreate, - ReadWithoutTimeout: resourceOpenzfsFileSystemRead, - UpdateWithoutTimeout: resourceOpenzfsFileSystemUpdate, - DeleteWithoutTimeout: resourceOpenzfsFileSystemDelete, + CreateWithoutTimeout: resourceOpenZFSFileSystemCreate, + ReadWithoutTimeout: resourceOpenZFSFileSystemRead, + UpdateWithoutTimeout: resourceOpenZFSFileSystemUpdate, + DeleteWithoutTimeout: resourceOpenZFSFileSystemDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, @@ -109,6 +110,37 @@ func ResourceOpenzfsFileSystem() *schema.Resource { }, }, }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "endpoint_ip_address_range": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + "network_interface_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "preferred_subnet_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "root_volume_configuration": { Type: schema.TypeList, Optional: true, @@ -201,30 +233,17 @@ func ResourceOpenzfsFileSystem() *schema.Resource { }, }, }, - "dns_name": { + "root_volume_id": { Type: schema.TypeString, Computed: true, }, - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidARN, - }, - "network_interface_ids": { - Type: schema.TypeList, + "route_table_ids": { + Type: schema.TypeSet, + Optional: true, Computed: true, + MaxItems: 50, Elem: &schema.Schema{Type: schema.TypeString}, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, - "root_volume_id": { - Type: schema.TypeString, - Computed: true, - }, "security_group_ids": { Type: schema.TypeSet, Optional: true, @@ -249,7 +268,6 @@ func ResourceOpenzfsFileSystem() *schema.Resource { Required: true, ForceNew: true, MinItems: 1, - MaxItems: 1, Elem: &schema.Schema{Type: schema.TypeString}, }, names.AttrTags: tftags.TagsSchema(), @@ -278,8 +296,8 @@ func ResourceOpenzfsFileSystem() *schema.Resource { validateDiskConfigurationIOPS, func(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { var ( - singleAZ1ThroughputCapacityValues = []int{64, 128, 256, 512, 1024, 2048, 3072, 4096} - singleAZ2ThroughputCapacityValues = []int{160, 320, 640, 1280, 2560, 3840, 5120, 7680, 10240} + singleAZ1ThroughputCapacityValues = []int{64, 128, 256, 512, 1024, 2048, 3072, 4096} + singleAZ2AndMultiAZ1ThroughputCapacityValues = []int{160, 320, 640, 1280, 2560, 3840, 5120, 7680, 10240} ) switch deploymentType, throughputCapacity := d.Get("deployment_type").(string), d.Get("throughput_capacity").(int); deploymentType { @@ -287,9 +305,9 @@ func ResourceOpenzfsFileSystem() *schema.Resource { if !slices.Contains(singleAZ1ThroughputCapacityValues, throughputCapacity) { return fmt.Errorf("%d is not a valid value for `throughput_capacity` when `deployment_type` is %q. Valid values: %v", throughputCapacity, deploymentType, singleAZ1ThroughputCapacityValues) } - case fsx.OpenZFSDeploymentTypeSingleAz2: - if !slices.Contains(singleAZ2ThroughputCapacityValues, throughputCapacity) { - return fmt.Errorf("%d is not a valid value for `throughput_capacity` when `deployment_type` is %q. Valid values: %v", throughputCapacity, deploymentType, singleAZ2ThroughputCapacityValues) + case fsx.OpenZFSDeploymentTypeSingleAz2, fsx.OpenZFSDeploymentTypeMultiAz1: + if !slices.Contains(singleAZ2AndMultiAZ1ThroughputCapacityValues, throughputCapacity) { + return fmt.Errorf("%d is not a valid value for `throughput_capacity` when `deployment_type` is %q. Valid values: %v", throughputCapacity, deploymentType, singleAZ2AndMultiAZ1ThroughputCapacityValues) } // default: // Allow validation to pass for unknown/new types. @@ -325,201 +343,190 @@ func validateDiskConfigurationIOPS(_ context.Context, d *schema.ResourceDiff, me return nil } -func resourceOpenzfsFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - input := &fsx.CreateFileSystemInput{ + inputC := &fsx.CreateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemType: aws.String(fsx.FileSystemTypeOpenzfs), - StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), - StorageType: aws.String(d.Get("storage_type").(string)), - SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), OpenZFSConfiguration: &fsx.CreateFileSystemOpenZFSConfiguration{ DeploymentType: aws.String(d.Get("deployment_type").(string)), AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), }, - Tags: getTagsIn(ctx), + StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), + StorageType: aws.String(d.Get("storage_type").(string)), + SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), + Tags: getTagsIn(ctx), } - - backupInput := &fsx.CreateFileSystemFromBackupInput{ + inputB := &fsx.CreateFileSystemFromBackupInput{ ClientRequestToken: aws.String(id.UniqueId()), - StorageType: aws.String(d.Get("storage_type").(string)), - SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), OpenZFSConfiguration: &fsx.CreateFileSystemOpenZFSConfiguration{ DeploymentType: aws.String(d.Get("deployment_type").(string)), AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), }, - Tags: getTagsIn(ctx), + StorageType: aws.String(d.Get("storage_type").(string)), + SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), + Tags: getTagsIn(ctx), + } + + if v, ok := d.GetOk("copy_tags_to_backups"); ok { + inputC.OpenZFSConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + inputB.OpenZFSConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("copy_tags_to_volumes"); ok { + inputC.OpenZFSConfiguration.CopyTagsToVolumes = aws.Bool(v.(bool)) + inputB.OpenZFSConfiguration.CopyTagsToVolumes = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { + inputC.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + inputB.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) } if v, ok := d.GetOk("disk_iops_configuration"); ok { - input.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenzfsFileDiskIopsConfiguration(v.([]interface{})) - backupInput.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenzfsFileDiskIopsConfiguration(v.([]interface{})) + inputC.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(v.([]interface{})) + inputB.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(v.([]interface{})) } - if v, ok := d.GetOk("root_volume_configuration"); ok { - input.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenzfsRootVolumeConfiguration(v.([]interface{})) - backupInput.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenzfsRootVolumeConfiguration(v.([]interface{})) + if v, ok := d.GetOk("endpoint_ip_address_range"); ok { + inputC.OpenZFSConfiguration.EndpointIpAddressRange = aws.String(v.(string)) + inputB.OpenZFSConfiguration.EndpointIpAddressRange = aws.String(v.(string)) } if v, ok := d.GetOk("kms_key_id"); ok { - input.KmsKeyId = aws.String(v.(string)) - backupInput.KmsKeyId = aws.String(v.(string)) + inputC.KmsKeyId = aws.String(v.(string)) + inputB.KmsKeyId = aws.String(v.(string)) } - if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { - input.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) - backupInput.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + if v, ok := d.GetOk("preferred_subnet_id"); ok { + inputC.OpenZFSConfiguration.PreferredSubnetId = aws.String(v.(string)) + inputB.OpenZFSConfiguration.PreferredSubnetId = aws.String(v.(string)) } - if v, ok := d.GetOk("security_group_ids"); ok { - input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) - backupInput.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + if v, ok := d.GetOk("root_volume_configuration"); ok { + inputC.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSRootVolumeConfiguration(v.([]interface{})) + inputB.OpenZFSConfiguration.RootVolumeConfiguration = expandOpenZFSRootVolumeConfiguration(v.([]interface{})) } - if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { - input.OpenZFSConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) - backupInput.OpenZFSConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + if v, ok := d.GetOk("route_table_ids"); ok { + inputC.OpenZFSConfiguration.RouteTableIds = flex.ExpandStringSet(v.(*schema.Set)) + inputB.OpenZFSConfiguration.RouteTableIds = flex.ExpandStringSet(v.(*schema.Set)) } - if v, ok := d.GetOk("throughput_capacity"); ok { - input.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) - backupInput.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("security_group_ids"); ok { + inputC.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + inputB.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } - if v, ok := d.GetOk("copy_tags_to_backups"); ok { - input.OpenZFSConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) - backupInput.OpenZFSConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + if v, ok := d.GetOk("throughput_capacity"); ok { + inputC.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) + inputB.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("copy_tags_to_volumes"); ok { - input.OpenZFSConfiguration.CopyTagsToVolumes = aws.Bool(v.(bool)) - backupInput.OpenZFSConfiguration.CopyTagsToVolumes = aws.Bool(v.(bool)) + if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { + inputC.OpenZFSConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + inputB.OpenZFSConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) } if v, ok := d.GetOk("backup_id"); ok { - backupInput.BackupId = aws.String(v.(string)) + backupID := v.(string) + inputB.BackupId = aws.String(backupID) - log.Printf("[DEBUG] Creating FSx OpenZFS File System: %s", backupInput) - result, err := conn.CreateFileSystemFromBackupWithContext(ctx, backupInput) + output, err := conn.CreateFileSystemFromBackupWithContext(ctx, inputB) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx OpenZFS File System from backup: %s", err) + return sdkdiag.AppendErrorf(diags, "creating FSx for OpenZFS File System from backup (%s): %s", backupID, err) } - d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) } else { - log.Printf("[DEBUG] Creating FSx OpenZFS File System: %s", input) - result, err := conn.CreateFileSystemWithContext(ctx, input) + output, err := conn.CreateFileSystemWithContext(ctx, inputC) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating FSx OpenZFS File System: %s", err) + return sdkdiag.AppendErrorf(diags, "creating FSx for OpenZFS File System: %s", err) } - d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) } if _, err := waitFileSystemCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS File System (%s) create: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) create: %s", d.Id(), err) } - return append(diags, resourceOpenzfsFileSystemRead(ctx, d, meta)...) + return append(diags, resourceOpenZFSFileSystemRead(ctx, d, meta)...) } -func resourceOpenzfsFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - filesystem, err := FindFileSystemByID(ctx, conn, d.Id()) + filesystem, err := FindOpenZFSFileSystemByID(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] FSx OpenZFS File System (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] FSx for OpenZFS File System (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading FSx OpenZFS File System (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading FSx for OpenZFS File System (%s): %s", d.Id(), err) } - openzfsConfig := filesystem.OpenZFSConfiguration - - if filesystem.WindowsConfiguration != nil { - return sdkdiag.AppendErrorf(diags, "expected FSx OpenZFS File System, found FSx Windows File System: %s", d.Id()) - } - - if filesystem.LustreConfiguration != nil { - return sdkdiag.AppendErrorf(diags, "expected FSx OpenZFS File System, found FSx Lustre File System: %s", d.Id()) - } - - if filesystem.OntapConfiguration != nil { - return sdkdiag.AppendErrorf(diags, "expected FSx OpeZFS File System, found FSx ONTAP File System: %s", d.Id()) - } - - if openzfsConfig == nil { - return sdkdiag.AppendErrorf(diags, "describing FSx OpenZFS File System (%s): empty Openzfs configuration", d.Id()) - } + openZFSConfig := filesystem.OpenZFSConfiguration d.Set("arn", filesystem.ResourceARN) + d.Set("automatic_backup_retention_days", openZFSConfig.AutomaticBackupRetentionDays) + d.Set("copy_tags_to_backups", openZFSConfig.CopyTagsToBackups) + d.Set("copy_tags_to_volumes", openZFSConfig.CopyTagsToVolumes) + d.Set("daily_automatic_backup_start_time", openZFSConfig.DailyAutomaticBackupStartTime) + d.Set("deployment_type", openZFSConfig.DeploymentType) + if err := d.Set("disk_iops_configuration", flattenOpenZFSFileDiskIopsConfiguration(openZFSConfig.DiskIopsConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting disk_iops_configuration: %s", err) + } d.Set("dns_name", filesystem.DNSName) - d.Set("deployment_type", openzfsConfig.DeploymentType) - d.Set("throughput_capacity", openzfsConfig.ThroughputCapacity) - d.Set("storage_type", filesystem.StorageType) + d.Set("endpoint_ip_address_range", openZFSConfig.EndpointIpAddressRange) d.Set("kms_key_id", filesystem.KmsKeyId) - - if err := d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting network_interface_ids: %s", err) - } - + d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)) d.Set("owner_id", filesystem.OwnerId) - d.Set("root_volume_id", openzfsConfig.RootVolumeId) + d.Set("preferred_subnet_id", openZFSConfig.PreferredSubnetId) + rootVolumeID := aws.StringValue(openZFSConfig.RootVolumeId) + d.Set("root_volume_id", rootVolumeID) + d.Set("route_table_ids", aws.StringValueSlice(openZFSConfig.RouteTableIds)) d.Set("storage_capacity", filesystem.StorageCapacity) - - if err := d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting subnet_ids: %s", err) - } + d.Set("storage_type", filesystem.StorageType) + d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)) + d.Set("throughput_capacity", openZFSConfig.ThroughputCapacity) + d.Set("vpc_id", filesystem.VpcId) + d.Set("weekly_maintenance_start_time", openZFSConfig.WeeklyMaintenanceStartTime) setTagsOut(ctx, filesystem.Tags) - if err := d.Set("disk_iops_configuration", flattenOpenzfsFileDiskIopsConfiguration(openzfsConfig.DiskIopsConfiguration)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting disk_iops_configuration: %s", err) - } - - d.Set("vpc_id", filesystem.VpcId) - d.Set("weekly_maintenance_start_time", openzfsConfig.WeeklyMaintenanceStartTime) - d.Set("automatic_backup_retention_days", openzfsConfig.AutomaticBackupRetentionDays) - d.Set("daily_automatic_backup_start_time", openzfsConfig.DailyAutomaticBackupStartTime) - d.Set("copy_tags_to_backups", openzfsConfig.CopyTagsToBackups) - d.Set("copy_tags_to_volumes", openzfsConfig.CopyTagsToVolumes) - - rootVolume, err := FindVolumeByID(ctx, conn, *openzfsConfig.RootVolumeId) + rootVolume, err := FindVolumeByID(ctx, conn, rootVolumeID) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading FSx OpenZFS Root Volume Configuration (%s): %s", *openzfsConfig.RootVolumeId, err) + return sdkdiag.AppendErrorf(diags, "reading FSx for OpenZFS File System (%s) root volume (%s): %s", d.Id(), rootVolumeID, err) } - if err := d.Set("root_volume_configuration", flattenOpenzfsRootVolumeConfiguration(rootVolume)); err != nil { + if err := d.Set("root_volume_configuration", flattenOpenZFSRootVolumeConfiguration(rootVolume)); err != nil { return sdkdiag.AppendErrorf(diags, "setting root_volume_configuration: %s", err) } return diags } -func resourceOpenzfsFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - if d.HasChangesExcept("tags_all", "tags") { + if d.HasChangesExcept("tags", "tags_all") { input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), OpenZFSConfiguration: &fsx.UpdateFileSystemOpenZFSConfiguration{}, } - if d.HasChange("storage_capacity") { - input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int))) - } - if d.HasChange("automatic_backup_retention_days") { input.OpenZFSConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))) } @@ -536,12 +543,29 @@ func resourceOpenzfsFileSystemUpdate(ctx context.Context, d *schema.ResourceData input.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string)) } - if d.HasChange("throughput_capacity") { - input.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(d.Get("throughput_capacity").(int))) + if d.HasChange("disk_iops_configuration") { + input.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenZFSFileDiskIopsConfiguration(d.Get("disk_iops_configuration").([]interface{})) } - if d.HasChange("disk_iops_configuration") { - input.OpenZFSConfiguration.DiskIopsConfiguration = expandOpenzfsFileDiskIopsConfiguration(d.Get("disk_iops_configuration").([]interface{})) + if d.HasChange("route_table_ids") { + o, n := d.GetChange("route_table_ids") + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := flex.ExpandStringValueSet(ns.Difference(os)), flex.ExpandStringValueSet(os.Difference(ns)) + + if len(add) > 0 { + input.OpenZFSConfiguration.AddRouteTableIds = aws.StringSlice(add) + } + if len(del) > 0 { + input.OpenZFSConfiguration.RemoveRouteTableIds = aws.StringSlice(del) + } + } + + if d.HasChange("storage_capacity") { + input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int))) + } + + if d.HasChange("throughput_capacity") { + input.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(d.Get("throughput_capacity").(int))) } if d.HasChange("weekly_maintenance_start_time") { @@ -551,46 +575,49 @@ func resourceOpenzfsFileSystemUpdate(ctx context.Context, d *schema.ResourceData _, err := conn.UpdateFileSystemWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating FSx OpenZFS File System (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS File System (%s): %s", d.Id(), err) } if _, err := waitFileSystemUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS File System (%s) update: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) update: %s", d.Id(), err) } if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS File System (%s) update: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action complete: %s", d.Id(), err) } if d.HasChange("root_volume_configuration") { + rootVolumeID := d.Get("root_volume_id").(string) input := &fsx.UpdateVolumeInput{ ClientRequestToken: aws.String(id.UniqueId()), - VolumeId: aws.String(d.Get("root_volume_id").(string)), - OpenZFSConfiguration: &fsx.UpdateOpenZFSVolumeConfiguration{}, + OpenZFSConfiguration: expandOpenZFSUpdateRootVolumeConfiguration(d.Get("root_volume_configuration").([]interface{})), + VolumeId: aws.String(rootVolumeID), } - input.OpenZFSConfiguration = expandOpenzfsUpdateRootVolumeConfiguration(d.Get("root_volume_configuration").([]interface{})) - _, err := conn.UpdateVolumeWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating FSx OpenZFS Root Volume (%s): %s", d.Get("root_volume_id").(string), err) + return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS Root Volume (%s): %s", rootVolumeID, err) } - if _, err := waitVolumeUpdated(ctx, conn, d.Get("root_volume_id").(string), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Root Volume (%s) update: %s", d.Get("root_volume_id").(string), err) + if _, err := waitVolumeUpdated(ctx, conn, rootVolumeID, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Root Volume (%s) update: %s", rootVolumeID, err) + } + + if _, err := waitAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action complete: %s", d.Id(), err) } } } - return append(diags, resourceOpenzfsFileSystemRead(ctx, d, meta)...) + return append(diags, resourceOpenZFSFileSystemRead(ctx, d, meta)...) } -func resourceOpenzfsFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) - log.Printf("[DEBUG] Deleting FSx OpenZFS File System: %s", d.Id()) + log.Printf("[DEBUG] Deleting FSx for OpenZFS File System: %s", d.Id()) _, err := conn.DeleteFileSystemWithContext(ctx, &fsx.DeleteFileSystemInput{ FileSystemId: aws.String(d.Id()), }) @@ -600,17 +627,17 @@ func resourceOpenzfsFileSystemDelete(ctx context.Context, d *schema.ResourceData } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting FSx OpenZFS File System (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting FSx for OpenZFS File System (%s): %s", d.Id(), err) } if _, err := waitFileSystemDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS File System (%s) delete: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) delete: %s", d.Id(), err) } return diags } -func expandOpenzfsFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { +func expandOpenZFSFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { if len(cfg) < 1 { return nil } @@ -630,7 +657,7 @@ func expandOpenzfsFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConf return &out } -func expandOpenzfsRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateRootVolumeConfiguration { +func expandOpenZFSRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateRootVolumeConfiguration { if len(cfg) < 1 { return nil } @@ -656,17 +683,17 @@ func expandOpenzfsRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateR } if v, ok := conf["user_and_group_quotas"]; ok { - out.UserAndGroupQuotas = expandOpenzfsUserAndGroupQuotas(v.(*schema.Set).List()) + out.UserAndGroupQuotas = expandOpenZFSUserAndGroupQuotas(v.(*schema.Set).List()) } if v, ok := conf["nfs_exports"].([]interface{}); ok { - out.NfsExports = expandOpenzfsNFSExports(v) + out.NfsExports = expandOpenZFSNFSExports(v) } return &out } -func expandOpenzfsUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZFSVolumeConfiguration { +func expandOpenZFSUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZFSVolumeConfiguration { if len(cfg) < 1 { return nil } @@ -688,21 +715,21 @@ func expandOpenzfsUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.UpdateOp } if v, ok := conf["user_and_group_quotas"]; ok { - out.UserAndGroupQuotas = expandOpenzfsUserAndGroupQuotas(v.(*schema.Set).List()) + out.UserAndGroupQuotas = expandOpenZFSUserAndGroupQuotas(v.(*schema.Set).List()) } if v, ok := conf["nfs_exports"].([]interface{}); ok { - out.NfsExports = expandOpenzfsNFSExports(v) + out.NfsExports = expandOpenZFSNFSExports(v) } return &out } -func expandOpenzfsUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { +func expandOpenZFSUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { quotas := []*fsx.OpenZFSUserOrGroupQuota{} for _, quota := range cfg { - expandedQuota := expandOpenzfsUserAndGroupQuota(quota.(map[string]interface{})) + expandedQuota := expandOpenZFSUserAndGroupQuota(quota.(map[string]interface{})) if expandedQuota != nil { quotas = append(quotas, expandedQuota) } @@ -711,7 +738,7 @@ func expandOpenzfsUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGrou return quotas } -func expandOpenzfsUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { +func expandOpenZFSUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { if len(conf) < 1 { return nil } @@ -733,11 +760,11 @@ func expandOpenzfsUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUse return &out } -func expandOpenzfsNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { +func expandOpenZFSNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { exports := []*fsx.OpenZFSNfsExport{} for _, export := range cfg { - expandedExport := expandOpenzfsNFSExport(export.(map[string]interface{})) + expandedExport := expandOpenZFSNFSExport(export.(map[string]interface{})) if expandedExport != nil { exports = append(exports, expandedExport) } @@ -746,21 +773,21 @@ func expandOpenzfsNFSExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { return exports } -func expandOpenzfsNFSExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { +func expandOpenZFSNFSExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { out := fsx.OpenZFSNfsExport{} if v, ok := cfg["client_configurations"]; ok { - out.ClientConfigurations = expandOpenzfsClinetConfigurations(v.(*schema.Set).List()) + out.ClientConfigurations = expandOpenZFSClinetConfigurations(v.(*schema.Set).List()) } return &out } -func expandOpenzfsClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { +func expandOpenZFSClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { configurations := []*fsx.OpenZFSClientConfiguration{} for _, configuration := range cfg { - expandedConfiguration := expandOpenzfsClientConfiguration(configuration.(map[string]interface{})) + expandedConfiguration := expandOpenZFSClientConfiguration(configuration.(map[string]interface{})) if expandedConfiguration != nil { configurations = append(configurations, expandedConfiguration) } @@ -769,7 +796,7 @@ func expandOpenzfsClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientCo return configurations } -func expandOpenzfsClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { +func expandOpenZFSClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { out := fsx.OpenZFSClientConfiguration{} if v, ok := conf["clients"].(string); ok && len(v) > 0 { @@ -783,7 +810,7 @@ func expandOpenzfsClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSC return &out } -func flattenOpenzfsFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { +func flattenOpenZFSFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { if rs == nil { return []interface{}{} } @@ -799,7 +826,7 @@ func flattenOpenzfsFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []in return []interface{}{m} } -func flattenOpenzfsRootVolumeConfiguration(rs *fsx.Volume) []interface{} { +func flattenOpenZFSRootVolumeConfiguration(rs *fsx.Volume) []interface{} { if rs == nil { return []interface{}{} } @@ -812,7 +839,7 @@ func flattenOpenzfsRootVolumeConfiguration(rs *fsx.Volume) []interface{} { m["data_compression_type"] = aws.StringValue(rs.OpenZFSConfiguration.DataCompressionType) } if rs.OpenZFSConfiguration.NfsExports != nil { - m["nfs_exports"] = flattenOpenzfsFileNFSExports(rs.OpenZFSConfiguration.NfsExports) + m["nfs_exports"] = flattenOpenZFSFileNFSExports(rs.OpenZFSConfiguration.NfsExports) } if rs.OpenZFSConfiguration.ReadOnly != nil { m["read_only"] = aws.BoolValue(rs.OpenZFSConfiguration.ReadOnly) @@ -821,19 +848,19 @@ func flattenOpenzfsRootVolumeConfiguration(rs *fsx.Volume) []interface{} { m["record_size_kib"] = aws.Int64Value(rs.OpenZFSConfiguration.RecordSizeKiB) } if rs.OpenZFSConfiguration.UserAndGroupQuotas != nil { - m["user_and_group_quotas"] = flattenOpenzfsFileUserAndGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) + m["user_and_group_quotas"] = flattenOpenZFSFileUserAndGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) } return []interface{}{m} } -func flattenOpenzfsFileNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { +func flattenOpenZFSFileNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { exports := make([]map[string]interface{}, 0) for _, export := range rs { if export != nil { cfg := make(map[string]interface{}) - cfg["client_configurations"] = flattenOpenzfsClientConfigurations(export.ClientConfigurations) + cfg["client_configurations"] = flattenOpenZFSClientConfigurations(export.ClientConfigurations) exports = append(exports, cfg) } } @@ -845,7 +872,7 @@ func flattenOpenzfsFileNFSExports(rs []*fsx.OpenZFSNfsExport) []map[string]inter return nil } -func flattenOpenzfsClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { +func flattenOpenZFSClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { configurations := make([]map[string]interface{}, 0) for _, configuration := range rs { @@ -864,7 +891,7 @@ func flattenOpenzfsClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) [] return nil } -func flattenOpenzfsFileUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { +func flattenOpenZFSFileUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { quotas := make([]map[string]interface{}, 0) for _, quota := range rs { @@ -883,3 +910,66 @@ func flattenOpenzfsFileUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []m return nil } + +func FindOpenZFSFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { + output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeOpenzfs) + + if err != nil { + return nil, err + } + + if output.OpenZFSConfiguration == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output, nil +} + +func FindVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { + input := &fsx.DescribeVolumesInput{ + VolumeIds: aws.StringSlice([]string{id}), + } + + return findVolume(ctx, conn, input) +} + +func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) (*fsx.Volume, error) { + output, err := findVolumes(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput) ([]*fsx.Volume, error) { + var output []*fsx.Volume + + err := conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Volumes { + if v != nil { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go index 2385a8c1a099..7cb6621663f8 100644 --- a/internal/service/fsx/openzfs_file_system_test.go +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -30,7 +30,7 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { ) } -func TestAccFSxOpenzfsFileSystem_basic(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_basic(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -40,31 +40,28 @@ func TestAccFSxOpenzfsFileSystem_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`file-system/fs-.+`)), - resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "1"), - acctest.CheckResourceAttrAccountID(resourceName, "owner_id"), - resource.TestCheckResourceAttrSet(resourceName, "root_volume_id"), - resource.TestCheckResourceAttrSet(resourceName, "dns_name"), - resource.TestCheckResourceAttr(resourceName, "storage_capacity", "64"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.0", "id"), - resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), - resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexache.MustCompile(`^\d:\d\d:\d\d$`)), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OpenZFSDeploymentTypeSingleAz1), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), - resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd), - resource.TestCheckResourceAttrSet(resourceName, "kms_key_id"), - resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "64"), + resource.TestCheckNoResourceAttr(resourceName, "backup_id"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_volumes", "false"), + resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", ""), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OpenZFSDeploymentTypeSingleAz1), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "AUTOMATIC"), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "192"), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "AUTOMATIC"), + resource.TestCheckResourceAttrSet(resourceName, "dns_name"), + resource.TestCheckResourceAttr(resourceName, "endpoint_ip_address_range", ""), + resource.TestCheckResourceAttrSet(resourceName, "kms_key_id"), + resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "1"), + acctest.CheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "preferred_subnet_id", ""), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "NONE"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), @@ -76,19 +73,29 @@ func TestAccFSxOpenzfsFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.record_size_kib", "128"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "2"), + resource.TestCheckResourceAttrSet(resourceName, "root_volume_id"), + resource.TestCheckResourceAttr(resourceName, "route_table_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "64"), + resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.0", "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "64"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), + resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexache.MustCompile(`^\d:\d\d:\d\d$`)), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"security_group_ids"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccFSxOpenzfsFileSystem_diskIops(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_diskIops(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -98,12 +105,12 @@ func TestAccFSxOpenzfsFileSystem_diskIops(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_diskIOPSConfiguration(rName, 192), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "USER_PROVISIONED"), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "192"), @@ -118,7 +125,7 @@ func TestAccFSxOpenzfsFileSystem_diskIops(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_diskIOPSConfiguration(rName, 200), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "USER_PROVISIONED"), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "200"), @@ -128,7 +135,7 @@ func TestAccFSxOpenzfsFileSystem_diskIops(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_disappears(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_disappears(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -138,13 +145,13 @@ func TestAccFSxOpenzfsFileSystem_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceOpenzfsFileSystem(), resourceName), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffsx.ResourceOpenZFSFileSystem(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -152,7 +159,7 @@ func TestAccFSxOpenzfsFileSystem_disappears(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_rootVolume(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2, filesystem3 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -162,12 +169,12 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_rootVolume1(rName, "NONE", "false", 128), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "NONE"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), @@ -178,12 +185,22 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.1", "rw"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.record_size_kib", "128"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ "id": "10", "storage_capacity_quota_gib": "128", "type": "USER", }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "0", + "storage_capacity_quota_gib": "0", + "type": "USER", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "0", + "storage_capacity_quota_gib": "0", + "type": "GROUP", + }), ), }, { @@ -195,8 +212,8 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_rootVolume2(rName, "ZSTD", "true", 256, 8), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "ZSTD"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), @@ -207,19 +224,29 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.1", "rw"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "true"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.record_size_kib", "8"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ "id": "10", "storage_capacity_quota_gib": "256", "type": "USER", }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "0", + "storage_capacity_quota_gib": "0", + "type": "USER", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "0", + "storage_capacity_quota_gib": "0", + "type": "GROUP", + }), ), }, { Config: testAccOpenZFSFileSystemConfig_rootVolume3Client(rName, "NONE", "false", 128, 1024, 512), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem3), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem3), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem3), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem3), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "NONE"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), @@ -236,7 +263,7 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { }), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.record_size_kib", "512"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "4"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "6"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ "id": "10", "storage_capacity_quota_gib": "128", @@ -257,18 +284,28 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { "storage_capacity_quota_gib": "128", "type": "USER", }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "0", + "storage_capacity_quota_gib": "0", + "type": "USER", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "0", + "storage_capacity_quota_gib": "0", + "type": "GROUP", + }), ), }, { Config: testAccOpenZFSFileSystemConfig_rootVolume4(rName, "NONE", "false", 128, 1024), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "NONE"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "0"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.record_size_kib", "128"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "4"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "6"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ "id": "10", "storage_capacity_quota_gib": "128", @@ -289,13 +326,23 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { "storage_capacity_quota_gib": "128", "type": "USER", }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "0", + "storage_capacity_quota_gib": "0", + "type": "USER", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "0", + "storage_capacity_quota_gib": "0", + "type": "GROUP", + }), ), }, }, }) } -func TestAccFSxOpenzfsFileSystem_securityGroupIDs(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -305,12 +352,12 @@ func TestAccFSxOpenzfsFileSystem_securityGroupIDs(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_securityGroupIDs1(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), ), }, @@ -323,8 +370,8 @@ func TestAccFSxOpenzfsFileSystem_securityGroupIDs(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_securityGroupIDs2(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "2"), ), }, @@ -332,7 +379,7 @@ func TestAccFSxOpenzfsFileSystem_securityGroupIDs(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_tags(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_tags(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2, filesystem3 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -342,12 +389,12 @@ func TestAccFSxOpenzfsFileSystem_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -361,8 +408,8 @@ func TestAccFSxOpenzfsFileSystem_tags(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -371,8 +418,8 @@ func TestAccFSxOpenzfsFileSystem_tags(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem3), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem2, &filesystem3), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem3), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem2, &filesystem3), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -381,7 +428,7 @@ func TestAccFSxOpenzfsFileSystem_tags(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_copyTags(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_copyTags(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -391,12 +438,12 @@ func TestAccFSxOpenzfsFileSystem_copyTags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_copyTags(rName, "key1", "value1", "true"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "true"), @@ -412,8 +459,8 @@ func TestAccFSxOpenzfsFileSystem_copyTags(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_copyTags(rName, "key1", "value1", "false"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_volumes", "false"), @@ -423,7 +470,7 @@ func TestAccFSxOpenzfsFileSystem_copyTags(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_throughput(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_throughput(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -433,12 +480,12 @@ func TestAccFSxOpenzfsFileSystem_throughput(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_throughput(rName, 64), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "64"), ), }, @@ -451,8 +498,8 @@ func TestAccFSxOpenzfsFileSystem_throughput(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_throughput(rName, 128), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "128"), ), }, @@ -460,7 +507,7 @@ func TestAccFSxOpenzfsFileSystem_throughput(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_storageType(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_storageType(t *testing.T) { ctx := acctest.Context(t) var filesystem1 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -470,12 +517,12 @@ func TestAccFSxOpenzfsFileSystem_storageType(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_storageType(rName, "SSD"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "storage_type", "SSD"), ), }, @@ -489,7 +536,7 @@ func TestAccFSxOpenzfsFileSystem_storageType(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -499,12 +546,12 @@ func TestAccFSxOpenzfsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_weeklyMaintenanceStartTime(rName, "1:01:01"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "1:01:01"), ), }, @@ -517,8 +564,8 @@ func TestAccFSxOpenzfsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_weeklyMaintenanceStartTime(rName, "2:02:02"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "2:02:02"), ), }, @@ -526,7 +573,7 @@ func TestAccFSxOpenzfsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_automaticBackupRetentionDays(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_automaticBackupRetentionDays(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -536,12 +583,12 @@ func TestAccFSxOpenzfsFileSystem_automaticBackupRetentionDays(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_automaticBackupRetentionDays(rName, 90), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "90"), ), }, @@ -554,15 +601,15 @@ func TestAccFSxOpenzfsFileSystem_automaticBackupRetentionDays(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_automaticBackupRetentionDays(rName, 0), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), ), }, { Config: testAccOpenZFSFileSystemConfig_automaticBackupRetentionDays(rName, 1), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "1"), ), }, @@ -570,7 +617,7 @@ func TestAccFSxOpenzfsFileSystem_automaticBackupRetentionDays(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_kmsKeyID(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_kmsKeyID(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -580,12 +627,12 @@ func TestAccFSxOpenzfsFileSystem_kmsKeyID(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_kmsKeyID(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", "aws_kms_key.test", "arn"), ), }, @@ -599,7 +646,7 @@ func TestAccFSxOpenzfsFileSystem_kmsKeyID(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -609,12 +656,12 @@ func TestAccFSxOpenzfsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_dailyAutomaticBackupStartTime(rName, "01:01"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", "01:01"), ), }, @@ -627,8 +674,8 @@ func TestAccFSxOpenzfsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { { Config: testAccOpenZFSFileSystemConfig_dailyAutomaticBackupStartTime(rName, "02:02"), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", "02:02"), ), }, @@ -636,7 +683,7 @@ func TestAccFSxOpenzfsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_throughputCapacity(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_throughputCapacity(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -646,12 +693,12 @@ func TestAccFSxOpenzfsFileSystem_throughputCapacity(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "64"), ), }, @@ -662,10 +709,10 @@ func TestAccFSxOpenzfsFileSystem_throughputCapacity(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccOpenzfsFileSystemConfig_throughputCapacity(rName), + Config: testAccOpenZFSFileSystemConfig_throughputCapacity(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "128"), ), }, @@ -673,7 +720,7 @@ func TestAccFSxOpenzfsFileSystem_throughputCapacity(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_storageCapacity(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_storageCapacity(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -683,12 +730,12 @@ func TestAccFSxOpenzfsFileSystem_storageCapacity(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccOpenZFSFileSystemConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "64"), ), }, @@ -699,10 +746,10 @@ func TestAccFSxOpenzfsFileSystem_storageCapacity(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccOpenzfsFileSystemConfig_storageCapacity(rName), + Config: testAccOpenZFSFileSystemConfig_storageCapacity(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemNotRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "75"), ), }, @@ -710,7 +757,7 @@ func TestAccFSxOpenzfsFileSystem_storageCapacity(t *testing.T) { }) } -func TestAccFSxOpenzfsFileSystem_deploymentType(t *testing.T) { +func TestAccFSxOpenZFSFileSystem_deploymentType(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" @@ -720,12 +767,12 @@ func TestAccFSxOpenzfsFileSystem_deploymentType(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOpenzfsFileSystemDestroy(ctx), + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccOpenzfsFileSystemConfig_deploymentType(rName, "SINGLE_AZ_1", 64), + Config: testAccOpenZFSFileSystemConfig_deploymentType(rName, "SINGLE_AZ_1", 64), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem1), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "deployment_type", "SINGLE_AZ_1"), resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "64"), ), @@ -737,10 +784,10 @@ func TestAccFSxOpenzfsFileSystem_deploymentType(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccOpenzfsFileSystemConfig_deploymentType(rName, "SINGLE_AZ_2", 160), + Config: testAccOpenZFSFileSystemConfig_deploymentType(rName, "SINGLE_AZ_2", 160), Check: resource.ComposeTestCheckFunc( - testAccCheckOpenzfsFileSystemExists(ctx, resourceName, &filesystem2), - testAccCheckOpenzfsFileSystemRecreated(&filesystem1, &filesystem2), + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckOpenZFSFileSystemRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttr(resourceName, "deployment_type", "SINGLE_AZ_2"), resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "160"), ), @@ -749,78 +796,196 @@ func TestAccFSxOpenzfsFileSystem_deploymentType(t *testing.T) { }) } -func testAccCheckOpenzfsFileSystemExists(ctx context.Context, resourceName string, fs *fsx.FileSystem) resource.TestCheckFunc { +func TestAccFSxOpenZFSFileSystem_multiAZ(t *testing.T) { + ctx := acctest.Context(t) + var filesystem fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccOpenZFSFileSystemConfig_multiAZ(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexache.MustCompile(`file-system/fs-.+`)), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), + resource.TestCheckNoResourceAttr(resourceName, "backup_id"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_volumes", "false"), + resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", ""), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OpenZFSDeploymentTypeMultiAz1), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "192"), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "AUTOMATIC"), + resource.TestCheckResourceAttrSet(resourceName, "dns_name"), + resource.TestCheckResourceAttrSet(resourceName, "endpoint_ip_address_range"), + resource.TestCheckResourceAttrSet(resourceName, "kms_key_id"), + resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "2"), + acctest.CheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttrPair(resourceName, "preferred_subnet_id", "aws_subnet.test.0", "id"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.clients", "*"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.#", "2"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.0", "rw"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.1", "crossmnt"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.record_size_kib", "128"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "2"), + resource.TestCheckResourceAttrSet(resourceName, "root_volume_id"), + resource.TestCheckResourceAttr(resourceName, "route_table_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "64"), + resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.0", "id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.1", "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "160"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), + resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexache.MustCompile(`^\d:\d\d:\d\d$`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFSxOpenZFSFileSystem_routeTableIDs(t *testing.T) { + ctx := acctest.Context(t) + var filesystem fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccOpenZFSFileSystemConfig_routeTableIDs(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "route_table_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "route_table_ids.*", "aws_route_table.test.0", "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenZFSFileSystemConfig_routeTableIDs(rName, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "route_table_ids.#", "2"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "route_table_ids.*", "aws_route_table.test.0", "id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "route_table_ids.*", "aws_route_table.test.1", "id"), + ), + }, + { + Config: testAccOpenZFSFileSystemConfig_routeTableIDs(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckOpenZFSFileSystemExists(ctx, resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "route_table_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "route_table_ids.*", "aws_route_table.test.0", "id"), + ), + }, + }, + }) +} + +func testAccCheckOpenZFSFileSystemExists(ctx context.Context, n string, v *fsx.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - filesystem, err := tffsx.FindFileSystemByID(ctx, conn, rs.Primary.ID) + output, err := tffsx.FindOpenZFSFileSystemByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if filesystem == nil { - return fmt.Errorf("FSx Openzfs File System (%s) not found", rs.Primary.ID) - } - - *fs = *filesystem + *v = *output return nil } } -func testAccCheckOpenzfsFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckOpenZFSFileSystemDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx OpenZFS File System (%s) recreated", aws.StringValue(i.FileSystemId)) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_openzfs_file_system" { + continue + } + + _, err := tffsx.FindOpenZFSFileSystemByID(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("FSx for OpenZFS File System %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckOpenzfsFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckOpenZFSFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx OpenZFS File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { + return fmt.Errorf("FSx OpenZFS File System (%s) recreated", aws.StringValue(i.FileSystemId)) } return nil } } -func testAccCheckOpenzfsFileSystemDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckOpenZFSFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_fsx_openzfs_file_system" { - continue - } - - filesystem, err := tffsx.FindFileSystemByID(ctx, conn, rs.Primary.ID) - if tfresource.NotFound(err) { - continue - } - - if filesystem != nil { - return fmt.Errorf("FSx OpenZFS File System (%s) still exists", rs.Primary.ID) - } + if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { + return fmt.Errorf("FSx OpenZFS File System (%s) not recreated", aws.StringValue(i.FileSystemId)) } + return nil } } -func testAccOpenzfsFileSystemConfig_base(rName string) string { +func testAccOpenZFSFileSystemConfig_baseSingleAZ(rName string) string { return acctest.ConfigVPCWithSubnets(rName, 1) } +func testAccOpenZFSFileSystemConfig_baseMultiAZ(rName string) string { + return acctest.ConfigVPCWithSubnets(rName, 2) +} + func testAccOpenZFSFileSystemConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), ` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), ` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -831,7 +996,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_diskIOPSConfiguration(rName string, iops int) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 storage_type = "SSD" @@ -851,10 +1016,10 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_securityGroupIDs1(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_security_group" "test1" { - description = "security group for FSx testing" - vpc_id = aws_vpc.test.id + name = "%[1]s-1" + vpc_id = aws_vpc.test.id ingress { cidr_blocks = [aws_vpc.test.cidr_block] @@ -891,10 +1056,10 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_securityGroupIDs2(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_security_group" "test1" { - description = "security group for FSx testing" - vpc_id = aws_vpc.test.id + name = "%[1]s-1" + vpc_id = aws_vpc.test.id ingress { cidr_blocks = [aws_vpc.test.cidr_block] @@ -916,8 +1081,8 @@ resource "aws_security_group" "test1" { } resource "aws_security_group" "test2" { - description = "security group for FSx testing" - vpc_id = aws_vpc.test.id + name = "%[1]s-2" + vpc_id = aws_vpc.test.id ingress { cidr_blocks = [aws_vpc.test.cidr_block] @@ -954,7 +1119,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -970,7 +1135,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -987,7 +1152,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_copyTags(rName, tagKey1, tagValue1, copyTags string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1005,7 +1170,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_weeklyMaintenanceStartTime(rName, weeklyMaintenanceStartTime string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1022,7 +1187,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_dailyAutomaticBackupStartTime(rName, dailyAutomaticBackupStartTime string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1040,7 +1205,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_automaticBackupRetentionDays(rName string, retention int) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1057,7 +1222,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_kmsKeyID(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { description = %[1]q deletion_window_in_days = 7 @@ -1079,7 +1244,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_throughput(rName string, throughput int) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1094,7 +1259,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_storageType(rName, storageType string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1110,27 +1275,41 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_rootVolume1(rName, dataCompression, readOnly string, quotaSize int) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 + root_volume_configuration { copy_tags_to_snapshots = true data_compression_type = %[2]q + nfs_exports { client_configurations { clients = "10.0.1.0/24" options = ["sync", "rw"] } } + read_only = %[3]s + user_and_group_quotas { id = 10 storage_capacity_quota_gib = %[4]d type = "USER" } + user_and_group_quotas { + id = 0 + storage_capacity_quota_gib = 0 + type = "USER" + } + user_and_group_quotas { + id = 0 + storage_capacity_quota_gib = 0 + type = "GROUP" + } } tags = { @@ -1141,7 +1320,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_rootVolume2(rName, dataCompression, readOnly string, quotaSize, recordSizeKiB int) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1151,6 +1330,7 @@ resource "aws_fsx_openzfs_file_system" "test" { root_volume_configuration { copy_tags_to_snapshots = true data_compression_type = %[2]q + nfs_exports { client_configurations { clients = "10.0.1.0/24" @@ -1166,6 +1346,16 @@ resource "aws_fsx_openzfs_file_system" "test" { storage_capacity_quota_gib = %[4]d type = "USER" } + user_and_group_quotas { + id = 0 + storage_capacity_quota_gib = 0 + type = "USER" + } + user_and_group_quotas { + id = 0 + storage_capacity_quota_gib = 0 + type = "GROUP" + } } tags = { @@ -1176,7 +1366,7 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_rootVolume3Client(rName, dataCompression, readOnly string, userQuota, groupQuota, recordSizeKiB int) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1186,6 +1376,7 @@ resource "aws_fsx_openzfs_file_system" "test" { root_volume_configuration { copy_tags_to_snapshots = true data_compression_type = %[2]q + nfs_exports { client_configurations { clients = "10.0.1.0/24" @@ -1220,6 +1411,16 @@ resource "aws_fsx_openzfs_file_system" "test" { storage_capacity_quota_gib = %[4]d type = "USER" } + user_and_group_quotas { + id = 0 + storage_capacity_quota_gib = 0 + type = "USER" + } + user_and_group_quotas { + id = 0 + storage_capacity_quota_gib = 0 + type = "GROUP" + } } tags = { @@ -1230,12 +1431,13 @@ resource "aws_fsx_openzfs_file_system" "test" { } func testAccOpenZFSFileSystemConfig_rootVolume4(rName, dataCompression, readOnly string, userQuota, groupQuota int) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 + root_volume_configuration { copy_tags_to_snapshots = true data_compression_type = %[2]q @@ -1260,6 +1462,16 @@ resource "aws_fsx_openzfs_file_system" "test" { storage_capacity_quota_gib = %[4]d type = "USER" } + user_and_group_quotas { + id = 0 + storage_capacity_quota_gib = 0 + type = "USER" + } + user_and_group_quotas { + id = 0 + storage_capacity_quota_gib = 0 + type = "GROUP" + } } tags = { @@ -1269,8 +1481,8 @@ resource "aws_fsx_openzfs_file_system" "test" { `, rName, dataCompression, readOnly, userQuota, groupQuota)) } -func testAccOpenzfsFileSystemConfig_throughputCapacity(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` +func testAccOpenZFSFileSystemConfig_throughputCapacity(rName string) string { + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1284,8 +1496,8 @@ resource "aws_fsx_openzfs_file_system" "test" { `, rName)) } -func testAccOpenzfsFileSystemConfig_storageCapacity(rName string) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` +func testAccOpenZFSFileSystemConfig_storageCapacity(rName string) string { + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 75 subnet_ids = aws_subnet.test[*].id @@ -1299,8 +1511,8 @@ resource "aws_fsx_openzfs_file_system" "test" { `, rName)) } -func testAccOpenzfsFileSystemConfig_deploymentType(rName, deploymentType string, throughput int) string { - return acctest.ConfigCompose(testAccOpenzfsFileSystemConfig_base(rName), fmt.Sprintf(` +func testAccOpenZFSFileSystemConfig_deploymentType(rName, deploymentType string, throughput int) string { + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseSingleAZ(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { storage_capacity = 64 subnet_ids = aws_subnet.test[*].id @@ -1313,3 +1525,59 @@ resource "aws_fsx_openzfs_file_system" "test" { } `, rName, deploymentType, throughput)) } + +func testAccOpenZFSFileSystemConfig_multiAZ(rName string) string { + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseMultiAZ(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = aws_subnet.test[*].id + preferred_subnet_id = aws_subnet.test[0].id + deployment_type = "MULTI_AZ_1" + throughput_capacity = 160 + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccOpenZFSFileSystemConfig_routeTableIDs(rName string, n int) string { + return acctest.ConfigCompose(testAccOpenZFSFileSystemConfig_baseMultiAZ(rName), fmt.Sprintf(` +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { + count = %[2]d + + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } + + tags = { + Name = %[1]q + } +} + +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = aws_subnet.test[*].id + preferred_subnet_id = aws_subnet.test[0].id + deployment_type = "MULTI_AZ_1" + throughput_capacity = 160 + route_table_ids = aws_route_table.test[*].id + + tags = { + Name = %[1]q + } +} +`, rName, n)) +} diff --git a/internal/service/fsx/service_package_gen.go b/internal/service/fsx/service_package_gen.go index 38d735af0a8e..20bade3c119b 100644 --- a/internal/service/fsx/service_package_gen.go +++ b/internal/service/fsx/service_package_gen.go @@ -95,7 +95,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceOpenzfsFileSystem, + Factory: ResourceOpenZFSFileSystem, TypeName: "aws_fsx_openzfs_file_system", Name: "OpenZFS File System", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/fsx/status.go b/internal/service/fsx/status.go index e7643eeef659..14368a4cf7bf 100644 --- a/internal/service/fsx/status.go +++ b/internal/service/fsx/status.go @@ -75,22 +75,6 @@ func statusFileSystem(ctx context.Context, conn *fsx.FSx, id string) retry.State } } -func statusDataRepositoryAssociation(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDataRepositoryAssociationByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Lifecycle), nil - } -} - func statusStorageVirtualMachine(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindStorageVirtualMachineByID(ctx, conn, id) diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 5d0b89df304e..3cda7b30059c 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -341,7 +341,7 @@ func sweepOpenZFSFileSystems(region string) error { continue } - r := ResourceOpenzfsFileSystem() + r := ResourceOpenZFSFileSystem() d := r.Data(nil) d.SetId(aws.StringValue(fs.FileSystemId)) diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 818accded921..d92cc7b14756 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -203,72 +203,6 @@ func waitFileSystemDeleted(ctx context.Context, conn *fsx.FSx, id string, timeou return nil, err } -func waitDataRepositoryAssociationCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.DataRepositoryLifecycleCreating}, - Target: []string{fsx.DataRepositoryLifecycleAvailable}, - Refresh: statusDataRepositoryAssociation(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) - } - - return output, err - } - - return nil, err -} - -func waitDataRepositoryAssociationUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.DataRepositoryLifecycleUpdating}, - Target: []string{fsx.DataRepositoryLifecycleAvailable}, - Refresh: statusDataRepositoryAssociation(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) - } - - return output, err - } - - return nil, err -} - -func waitDataRepositoryAssociationDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.DataRepositoryLifecycleAvailable, fsx.DataRepositoryLifecycleDeleting}, - Target: []string{}, - Refresh: statusDataRepositoryAssociation(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) - } - - return output, err - } - - return nil, err -} - func waitStorageVirtualMachineCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.StorageVirtualMachine, error) { stateConf := &retry.StateChangeConf{ Pending: []string{fsx.StorageVirtualMachineLifecycleCreating, fsx.StorageVirtualMachineLifecyclePending}, diff --git a/website/docs/r/fsx_openzfs_file_system.html.markdown b/website/docs/r/fsx_openzfs_file_system.html.markdown index d9e21bc8280c..2e2dfa8a662d 100644 --- a/website/docs/r/fsx_openzfs_file_system.html.markdown +++ b/website/docs/r/fsx_openzfs_file_system.html.markdown @@ -26,18 +26,21 @@ resource "aws_fsx_openzfs_file_system" "test" { This resource supports the following arguments: -* `deployment_type` - (Required) - The filesystem deployment type. Valid values: `SINGLE_AZ_1` and `SINGLE_AZ_2`. +* `deployment_type` - (Required) - The filesystem deployment type. Valid values: `SINGLE_AZ_1`, `SINGLE_AZ_2` and `MULTI_AZ_1`. * `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `64` and `524288`. -* `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. Exactly 1 subnet need to be provided. +* `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. * `throughput_capacity` - (Required) Throughput (MB/s) of the file system. Valid values depend on `deployment_type`. Must be one of `64`, `128`, `256`, `512`, `1024`, `2048`, `3072`, `4096` for `SINGLE_AZ_1`. Must be one of `160`, `320`, `640`, `1280`, `2560`, `3840`, `5120`, `7680`, `10240` for `SINGLE_AZ_2`. * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `backup_id` - (Optional) The ID of the source backup to create the filesystem from. * `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. * `copy_tags_to_volumes` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. * `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set. -* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for OpenZFS file system. See [Disk Iops Configuration](#disk-iops-configuration) Below. +* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for OpenZFS file system. See [Disk Iops Configuration](#disk-iops-configuration) below. +* `endpoint_ip_address_range` - (Optional) (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. -* `root_volume_configuration` - (Optional) The configuration for the root volume of the file system. All other volumes are children or the root volume. See [Root Volume Configuration](#root-volume-configuration) Below. +* `preferred_subnet_id` - (Optional) (Multi-AZ only) Required when `deployment_type` is set to `MULTI_AZ_1`. This specifies the subnet in which you want the preferred file server to be located. +* `root_volume_configuration` - (Optional) The configuration for the root volume of the file system. All other volumes are children or the root volume. See [Root Volume Configuration](#root-volume-configuration) below. +* `route_table_ids` - (Optional) (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. * `storage_type` - (Optional) The filesystem storage type. Only `SSD` is supported. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.