Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

r/fsx _lustre_file_system - add data_compression_type #19664

Merged
merged 3 commits into from
Jun 4, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/19664.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/aws_fsx_lustre_filesystem: Add `data_compression_type` argument.
```
71 changes: 40 additions & 31 deletions aws/resource_aws_fsx_lustre_file_system.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,12 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource {
ForceNew: true,
Default: false,
},
"data_compression_type": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice(fsx.DataCompressionType_Values(), false),
Default: fsx.DataCompressionTypeNone,
},
},

CustomizeDiff: customdiff.Sequence(
Expand Down Expand Up @@ -274,6 +280,10 @@ func resourceAwsFsxLustreFileSystemCreate(d *schema.ResourceData, meta interface
input.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool))
}

if v, ok := d.GetOk("data_compression_type"); ok {
input.LustreConfiguration.DataCompressionType = aws.String(v.(string))
}

result, err := conn.CreateFileSystem(input)
if err != nil {
return fmt.Errorf("Error creating FSx Lustre filesystem: %w", err)
Expand Down Expand Up @@ -301,39 +311,37 @@ func resourceAwsFsxLustreFileSystemUpdate(d *schema.ResourceData, meta interface
}
}

requestUpdate := false
input := &fsx.UpdateFileSystemInput{
ClientRequestToken: aws.String(resource.UniqueId()),
FileSystemId: aws.String(d.Id()),
LustreConfiguration: &fsx.UpdateFileSystemLustreConfiguration{},
}
if d.HasChangesExcept("tags_all", "tags") {
input := &fsx.UpdateFileSystemInput{
ClientRequestToken: aws.String(resource.UniqueId()),
FileSystemId: aws.String(d.Id()),
LustreConfiguration: &fsx.UpdateFileSystemLustreConfiguration{},
}

if d.HasChange("weekly_maintenance_start_time") {
input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string))
requestUpdate = true
}
if d.HasChange("weekly_maintenance_start_time") {
input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string))
}

if d.HasChange("automatic_backup_retention_days") {
input.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int)))
requestUpdate = true
}
if d.HasChange("automatic_backup_retention_days") {
input.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int)))
}

if d.HasChange("daily_automatic_backup_start_time") {
input.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string))
requestUpdate = true
}
if d.HasChange("daily_automatic_backup_start_time") {
input.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string))
}

if d.HasChange("auto_import_policy") {
input.LustreConfiguration.AutoImportPolicy = aws.String(d.Get("auto_import_policy").(string))
requestUpdate = true
}
if d.HasChange("auto_import_policy") {
input.LustreConfiguration.AutoImportPolicy = aws.String(d.Get("auto_import_policy").(string))
}

if d.HasChange("storage_capacity") {
input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int)))
requestUpdate = true
}
if d.HasChange("storage_capacity") {
input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int)))
}

if v, ok := d.GetOk("data_compression_type"); ok {
input.LustreConfiguration.DataCompressionType = aws.String(v.(string))
}

if requestUpdate {
_, err := conn.UpdateFileSystem(input)
if err != nil {
return fmt.Errorf("error updating FSX Lustre File System (%s): %w", d.Id(), err)
Expand Down Expand Up @@ -397,10 +405,10 @@ func resourceAwsFsxLustreFileSystemRead(d *schema.ResourceData, meta interface{}
if lustreConfig.PerUnitStorageThroughput != nil {
d.Set("per_unit_storage_throughput", lustreConfig.PerUnitStorageThroughput)
}
d.Set("mount_name", filesystem.LustreConfiguration.MountName)
d.Set("mount_name", lustreConfig.MountName)
d.Set("storage_type", filesystem.StorageType)
if filesystem.LustreConfiguration.DriveCacheType != nil {
d.Set("drive_cache_type", filesystem.LustreConfiguration.DriveCacheType)
if lustreConfig.DriveCacheType != nil {
d.Set("drive_cache_type", lustreConfig.DriveCacheType)
}

if filesystem.KmsKeyId != nil {
Expand Down Expand Up @@ -433,7 +441,8 @@ func resourceAwsFsxLustreFileSystemRead(d *schema.ResourceData, meta interface{}
d.Set("weekly_maintenance_start_time", lustreConfig.WeeklyMaintenanceStartTime)
d.Set("automatic_backup_retention_days", lustreConfig.AutomaticBackupRetentionDays)
d.Set("daily_automatic_backup_start_time", lustreConfig.DailyAutomaticBackupStartTime)
d.Set("copy_tags_to_backups", filesystem.LustreConfiguration.CopyTagsToBackups)
d.Set("copy_tags_to_backups", lustreConfig.CopyTagsToBackups)
d.Set("data_compression_type", lustreConfig.DataCompressionType)

return nil
}
Expand Down
53 changes: 53 additions & 0 deletions aws/resource_aws_fsx_lustre_file_system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ func TestAccAWSFsxLustreFileSystem_basic(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"),
resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd),
resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"),
resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeNone),
),
},
{
Expand Down Expand Up @@ -141,6 +142,47 @@ func TestAccAWSFsxLustreFileSystem_disappears(t *testing.T) {
})
}

func TestAccAWSFsxLustreFileSystem_dataCompression(t *testing.T) {
var filesystem fsx.FileSystem
resourceName := "aws_fsx_lustre_file_system.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(fsx.EndpointsID, t) },
ErrorCheck: testAccErrorCheck(t, fsx.EndpointsID),
Providers: testAccProviders,
CheckDestroy: testAccCheckFsxLustreFileSystemDestroy,
Steps: []resource.TestStep{
{
Config: testAccAwsFsxLustreFileSystemConfigCompression(),
Check: resource.ComposeTestCheckFunc(
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem),
resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeLz4),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"security_group_ids"},
},
{
Config: testAccAwsFsxLustreFileSystemConfigSubnetIds1(),
Check: resource.ComposeTestCheckFunc(
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem),
resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeNone),
),
},
{
Config: testAccAwsFsxLustreFileSystemConfigCompression(),
Check: resource.ComposeTestCheckFunc(
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem),
resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeLz4),
),
},
},
})
}

func TestAccAWSFsxLustreFileSystem_ExportPath(t *testing.T) {
var filesystem1, filesystem2 fsx.FileSystem
resourceName := "aws_fsx_lustre_file_system.test"
Expand Down Expand Up @@ -1151,3 +1193,14 @@ resource "aws_fsx_lustre_file_system" "test" {
}
`)
}

func testAccAwsFsxLustreFileSystemConfigCompression() string {
return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), `
resource "aws_fsx_lustre_file_system" "test" {
storage_capacity = 1200
subnet_ids = [aws_subnet.test1.id]
deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1
data_compression_type = "LZ4"
}
`)
}
1 change: 1 addition & 0 deletions website/docs/r/fsx_lustre_file_system.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ The following arguments are supported:
* `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. only valid for `PERSISTENT_1` deployment_type. Requires `automatic_backup_retention_days` to be set.
* `auto_import_policy` - (Optional) How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see [Auto Import Data Repo](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) for more details.
* `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the file system should be copied to backups. Applicable for `PERSISTENT_1` deployment_type. The default value is false.
* `data_compression_type` - (Optional) Sets the data compression configuration for the file system. Valid values are `LZ4` and `NONE`. Default value is `NONE`. Unsetting this value reverts the compression type back to `NONE`.

## Attributes Reference

Expand Down