From 7f98c97359f637ff32c2ade8d6d4890c9fdefedb Mon Sep 17 00:00:00 2001 From: AWS SDK For Ruby Date: Tue, 9 Jul 2024 18:14:56 +0000 Subject: [PATCH] Updated API models and rebuilt service gems. --- apis/datazone/2018-05-10/api-2.json | 16 +- apis/fsx/2018-03-01/api-2.json | 14 +- apis/fsx/2018-03-01/docs-2.json | 31 +- apis/opensearch/2021-01-01/api-2.json | 63 +- apis/opensearch/2021-01-01/docs-2.json | 46 ++ apis/sagemaker/2017-07-24/api-2.json | 403 +++++++++- apis/sagemaker/2017-07-24/docs-2.json | 282 ++++++- apis/sagemaker/2017-07-24/paginators-1.json | 6 + gems/aws-partitions/CHANGELOG.md | 5 + gems/aws-partitions/VERSION | 2 +- gems/aws-partitions/partitions.json | 33 +- gems/aws-sdk-datazone/CHANGELOG.md | 5 + gems/aws-sdk-datazone/VERSION | 2 +- gems/aws-sdk-datazone/lib/aws-sdk-datazone.rb | 2 +- .../lib/aws-sdk-datazone/client.rb | 2 +- .../lib/aws-sdk-datazone/client_api.rb | 6 +- gems/aws-sdk-fsx/CHANGELOG.md | 5 + gems/aws-sdk-fsx/VERSION | 2 +- gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb | 2 +- gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb | 177 ++--- .../aws-sdk-fsx/lib/aws-sdk-fsx/client_api.rb | 1 + gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb | 194 +++-- gems/aws-sdk-fsx/sig/client.rbs | 9 +- gems/aws-sdk-fsx/sig/types.rbs | 13 +- gems/aws-sdk-opensearchservice/CHANGELOG.md | 5 + gems/aws-sdk-opensearchservice/VERSION | 2 +- .../lib/aws-sdk-opensearchservice.rb | 2 +- .../lib/aws-sdk-opensearchservice/client.rb | 42 +- .../aws-sdk-opensearchservice/client_api.rb | 28 + .../lib/aws-sdk-opensearchservice/types.rb | 119 ++- gems/aws-sdk-opensearchservice/sig/client.rbs | 10 + gems/aws-sdk-opensearchservice/sig/types.rbs | 31 + gems/aws-sdk-sagemaker/CHANGELOG.md | 5 + gems/aws-sdk-sagemaker/VERSION | 2 +- .../lib/aws-sdk-sagemaker.rb | 2 +- .../lib/aws-sdk-sagemaker/client.rb | 433 ++++++++++- .../lib/aws-sdk-sagemaker/client_api.rb | 223 ++++++ .../lib/aws-sdk-sagemaker/endpoints.rb | 70 ++ .../aws-sdk-sagemaker/plugins/endpoints.rb | 10 + .../lib/aws-sdk-sagemaker/types.rb | 697 +++++++++++++++++- gems/aws-sdk-sagemaker/sig/client.rbs | 151 ++++ gems/aws-sdk-sagemaker/sig/types.rbs | 163 ++++ 42 files changed, 3074 insertions(+), 242 deletions(-) diff --git a/apis/datazone/2018-05-10/api-2.json b/apis/datazone/2018-05-10/api-2.json index d8a12046e55..3d6b52272a1 100644 --- a/apis/datazone/2018-05-10/api-2.json +++ b/apis/datazone/2018-05-10/api-2.json @@ -3698,11 +3698,15 @@ "members":{ "domainId":{"shape":"DomainId"}, "itemId":{"shape":"DataProductId"} - } + }, + "deprecated":true, + "deprecatedMessage":"This structure is deprecated." }, "DataProductItems":{ "type":"list", "member":{"shape":"DataProductItem"}, + "deprecated":true, + "deprecatedMessage":"This structure is deprecated.", "max":100, "min":0 }, @@ -3732,7 +3736,9 @@ "owningProjectId":{"shape":"ProjectId"}, "updatedAt":{"shape":"UpdatedAt"}, "updatedBy":{"shape":"UpdatedBy"} - } + }, + "deprecated":true, + "deprecatedMessage":"This structure is deprecated." }, "DataSourceConfigurationInput":{ "type":"structure", @@ -8293,7 +8299,11 @@ "type":"structure", "members":{ "assetItem":{"shape":"AssetItem"}, - "dataProductItem":{"shape":"DataProductSummary"}, + "dataProductItem":{ + "shape":"DataProductSummary", + "deprecated":true, + "deprecatedMessage":"This field is deprecated." + }, "glossaryItem":{"shape":"GlossaryItem"}, "glossaryTermItem":{"shape":"GlossaryTermItem"} }, diff --git a/apis/fsx/2018-03-01/api-2.json b/apis/fsx/2018-03-01/api-2.json index 6e27d57cc92..6f7d226ebc2 100644 --- a/apis/fsx/2018-03-01/api-2.json +++ b/apis/fsx/2018-03-01/api-2.json @@ -847,7 +847,8 @@ "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", - "VOLUME_INITIALIZE_WITH_SNAPSHOT" + "VOLUME_INITIALIZE_WITH_SNAPSHOT", + "DOWNLOAD_DATA_FROM_BACKUP" ] }, "AdministrativeActions":{ @@ -2979,7 +2980,8 @@ "enum":[ "MULTI_AZ_1", "SINGLE_AZ_1", - "SINGLE_AZ_2" + "SINGLE_AZ_2", + "MULTI_AZ_2" ] }, "OntapEndpointIpAddresses":{ @@ -3089,6 +3091,8 @@ "enum":[ "SINGLE_AZ_1", "SINGLE_AZ_2", + "SINGLE_AZ_HA_1", + "SINGLE_AZ_HA_2", "MULTI_AZ_1" ] }, @@ -3615,7 +3619,8 @@ "IN_PROGRESS", "PENDING", "COMPLETED", - "UPDATED_OPTIMIZING" + "UPDATED_OPTIMIZING", + "OPTIMIZING" ] }, "StorageCapacity":{ @@ -3972,7 +3977,8 @@ "ThroughputCapacity":{"shape":"MegabytesPerSecond"}, "AddRouteTableIds":{"shape":"RouteTableIds"}, "RemoveRouteTableIds":{"shape":"RouteTableIds"}, - "ThroughputCapacityPerHAPair":{"shape":"ThroughputCapacityPerHAPair"} + "ThroughputCapacityPerHAPair":{"shape":"ThroughputCapacityPerHAPair"}, + "HAPairs":{"shape":"HAPairs"} } }, "UpdateFileSystemOpenZFSConfiguration":{ diff --git a/apis/fsx/2018-03-01/docs-2.json b/apis/fsx/2018-03-01/docs-2.json index 1c12c0b2363..9cc606ce271 100644 --- a/apis/fsx/2018-03-01/docs-2.json +++ b/apis/fsx/2018-03-01/docs-2.json @@ -107,7 +107,7 @@ } }, "AdministrativeActionType": { - "base": "

Describes the type of administrative action, as follows:

", + "base": "

Describes the type of administrative action, as follows:

", "refs": { "AdministrativeAction$AdministrativeActionType": null } @@ -143,7 +143,7 @@ "Aggregates": { "base": null, "refs": { - "AggregateConfiguration$Aggregates": "

The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", + "AggregateConfiguration$Aggregates": "

The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", "CreateAggregateConfiguration$Aggregates": "

Used to specify the names of aggregates on which the volume will be created.

" } }, @@ -1259,7 +1259,7 @@ "base": "

A structure providing details of any failures that occurred.

", "refs": { "FileCache$FailureDetails": "

A structure providing details of any failures that occurred.

", - "FileCacheCreating$FailureDetails": "

A structure providing details of any failures that occurred.

" + "FileCacheCreating$FailureDetails": "

A structure providing details of any failures that occurred in creating a cache.

" } }, "FileCacheId": { @@ -1549,8 +1549,9 @@ "HAPairs": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$HAPairs": "

Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", - "OntapFileSystemConfiguration$HAPairs": "

Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + "CreateFileSystemOntapConfiguration$HAPairs": "

Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", + "OntapFileSystemConfiguration$HAPairs": "

Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", + "UpdateFileSystemOntapConfiguration$HAPairs": "

Use to update the number of high-availability (HA) pairs for a second-generation single-AZ file system. If you increase the number of HA pairs for your file system, you must specify proportional increases for StorageCapacity, Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols.

" } }, "IncludeShared": { @@ -1943,8 +1944,8 @@ "OntapDeploymentType": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$DeploymentType": "

Specifies the FSx for ONTAP file system deployment type to use in creating the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.

", - "OntapFileSystemConfiguration$DeploymentType": "

Specifies the FSx for ONTAP file system deployment type in use in the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.

" + "CreateFileSystemOntapConfiguration$DeploymentType": "

Specifies the FSx for ONTAP file system deployment type to use in creating the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.

", + "OntapFileSystemConfiguration$DeploymentType": "

Specifies the FSx for ONTAP file system deployment type in use in the file system.

For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.

" } }, "OntapEndpointIpAddresses": { @@ -2017,8 +2018,8 @@ "OpenZFSDeploymentType": { "base": null, "refs": { - "CreateFileSystemOpenZFSConfiguration$DeploymentType": "

Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

For more information, see Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

", - "OpenZFSFileSystemConfiguration$DeploymentType": "

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2.

" + "CreateFileSystemOpenZFSConfiguration$DeploymentType": "

Specifies the file system deployment type. Valid values are the following:

For a list of which Amazon Web Services Regions each deployment type is available in, see Deployment type availability. For more information on the differences in performance between deployment types, see File system performance in the Amazon FSx for OpenZFS User Guide.

", + "OpenZFSFileSystemConfiguration$DeploymentType": "

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_HA_2, SINGLE_AZ_HA_1, SINGLE_AZ_2, and SINGLE_AZ_1.

" } }, "OpenZFSFileSystemConfiguration": { @@ -2122,7 +2123,7 @@ "ProgressPercent": { "base": "

Displays the current percent of progress of an asynchronous task.

", "refs": { - "AdministrativeAction$ProgressPercent": "

The percentage-complete status of a STORAGE_OPTIMIZATION administrative action. Does not apply to any other administrative action type.

", + "AdministrativeAction$ProgressPercent": "

The percentage-complete status of a STORAGE_OPTIMIZATION or DOWNLOAD_DATA_FROM_BACKUP administrative action. Does not apply to any other administrative action type.

", "Backup$ProgressPercent": null } }, @@ -2505,7 +2506,7 @@ "Status": { "base": null, "refs": { - "AdministrativeAction$Status": "

The status of the administrative action, as follows:

" + "AdministrativeAction$Status": "

The status of the administrative action, as follows:

" } }, "StorageCapacity": { @@ -2633,7 +2634,7 @@ "SubnetId": { "base": "

The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and subnets in the Amazon VPC User Guide.

", "refs": { - "CreateFileSystemOntapConfiguration$PreferredSubnetId": "

Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.

", + "CreateFileSystemOntapConfiguration$PreferredSubnetId": "

Required when DeploymentType is set to MULTI_AZ_1 or MULTI_AZ_2. This specifies the subnet in which you want the preferred file server to be located.

", "CreateFileSystemOpenZFSConfiguration$PreferredSubnetId": "

Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.

", "CreateFileSystemWindowsConfiguration$PreferredSubnetId": "

Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located. For in-Amazon Web Services applications, we recommend that you launch your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ data transfer costs and minimize latency.

", "InvalidNetworkSettings$InvalidSubnetId": "

The subnet ID that is either invalid or not part of the VPC specified.

", @@ -2768,9 +2769,9 @@ "ThroughputCapacityPerHAPair": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$ThroughputCapacityPerHAPair": "

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both.

This field and ThroughputCapacity are the same for scale-up file systems powered by one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", - "OntapFileSystemConfiguration$ThroughputCapacityPerHAPair": "

Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", - "UpdateFileSystemOntapConfiguration$ThroughputCapacityPerHAPair": "

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" + "CreateFileSystemOntapConfiguration$ThroughputCapacityPerHAPair": "

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both.

This field and ThroughputCapacity are the same for file systems powered by one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", + "OntapFileSystemConfiguration$ThroughputCapacityPerHAPair": "

Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

", + "UpdateFileSystemOntapConfiguration$ThroughputCapacityPerHAPair": "

Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.

This field and ThroughputCapacity cannot be defined in the same API call, but one is required.

This field and ThroughputCapacity are the same for file systems with one HA pair.

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

" } }, "TieringPolicy": { diff --git a/apis/opensearch/2021-01-01/api-2.json b/apis/opensearch/2021-01-01/api-2.json index ae094168fe8..b1bae1abcc1 100644 --- a/apis/opensearch/2021-01-01/api-2.json +++ b/apis/opensearch/2021-01-01/api-2.json @@ -1000,6 +1000,25 @@ } }, "shapes":{ + "AIMLOptionsInput":{ + "type":"structure", + "members":{ + "NaturalLanguageQueryGenerationOptions":{"shape":"NaturalLanguageQueryGenerationOptionsInput"} + } + }, + "AIMLOptionsOutput":{ + "type":"structure", + "members":{ + "NaturalLanguageQueryGenerationOptions":{"shape":"NaturalLanguageQueryGenerationOptionsOutput"} + } + }, + "AIMLOptionsStatus":{ + "type":"structure", + "members":{ + "Options":{"shape":"AIMLOptionsOutput"}, + "Status":{"shape":"OptionStatus"} + } + }, "ARN":{ "type":"string", "max":2048, @@ -1619,7 +1638,8 @@ "TagList":{"shape":"TagList"}, "AutoTuneOptions":{"shape":"AutoTuneOptionsInput"}, "OffPeakWindowOptions":{"shape":"OffPeakWindowOptions"}, - "SoftwareUpdateOptions":{"shape":"SoftwareUpdateOptions"} + "SoftwareUpdateOptions":{"shape":"SoftwareUpdateOptions"}, + "AIMLOptions":{"shape":"AIMLOptionsInput"} } }, "CreateDomainResponse":{ @@ -2278,7 +2298,8 @@ "ChangeProgressDetails":{"shape":"ChangeProgressDetails"}, "OffPeakWindowOptions":{"shape":"OffPeakWindowOptionsStatus"}, "SoftwareUpdateOptions":{"shape":"SoftwareUpdateOptionsStatus"}, - "ModifyingProperties":{"shape":"ModifyingPropertiesList"} + "ModifyingProperties":{"shape":"ModifyingPropertiesList"}, + "AIMLOptions":{"shape":"AIMLOptionsStatus"} } }, "DomainEndpointOptions":{ @@ -2471,7 +2492,8 @@ "OffPeakWindowOptions":{"shape":"OffPeakWindowOptions"}, "SoftwareUpdateOptions":{"shape":"SoftwareUpdateOptions"}, "DomainProcessingStatus":{"shape":"DomainProcessingStatusType"}, - "ModifyingProperties":{"shape":"ModifyingPropertiesList"} + "ModifyingProperties":{"shape":"ModifyingPropertiesList"}, + "AIMLOptions":{"shape":"AIMLOptionsOutput"} } }, "DomainStatusList":{ @@ -3365,6 +3387,38 @@ "type":"list", "member":{"shape":"ModifyingProperties"} }, + "NaturalLanguageQueryGenerationCurrentState":{ + "type":"string", + "enum":[ + "NOT_ENABLED", + "ENABLE_COMPLETE", + "ENABLE_IN_PROGRESS", + "ENABLE_FAILED", + "DISABLE_COMPLETE", + "DISABLE_IN_PROGRESS", + "DISABLE_FAILED" + ] + }, + "NaturalLanguageQueryGenerationDesiredState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "NaturalLanguageQueryGenerationOptionsInput":{ + "type":"structure", + "members":{ + "DesiredState":{"shape":"NaturalLanguageQueryGenerationDesiredState"} + } + }, + "NaturalLanguageQueryGenerationOptionsOutput":{ + "type":"structure", + "members":{ + "DesiredState":{"shape":"NaturalLanguageQueryGenerationDesiredState"}, + "CurrentState":{"shape":"NaturalLanguageQueryGenerationCurrentState"} + } + }, "NextToken":{"type":"string"}, "NodeId":{ "type":"string", @@ -4309,7 +4363,8 @@ "DryRun":{"shape":"DryRun"}, "DryRunMode":{"shape":"DryRunMode"}, "OffPeakWindowOptions":{"shape":"OffPeakWindowOptions"}, - "SoftwareUpdateOptions":{"shape":"SoftwareUpdateOptions"} + "SoftwareUpdateOptions":{"shape":"SoftwareUpdateOptions"}, + "AIMLOptions":{"shape":"AIMLOptionsInput"} } }, "UpdateDomainConfigResponse":{ diff --git a/apis/opensearch/2021-01-01/docs-2.json b/apis/opensearch/2021-01-01/docs-2.json index af8019750ea..41f9e3dbba3 100644 --- a/apis/opensearch/2021-01-01/docs-2.json +++ b/apis/opensearch/2021-01-01/docs-2.json @@ -67,6 +67,26 @@ "UpgradeDomain": "

Allows you to either upgrade your Amazon OpenSearch Service domain or perform an upgrade eligibility check to a compatible version of OpenSearch or Elasticsearch.

" }, "shapes": { + "AIMLOptionsInput": { + "base": "

Container for parameters required to enable all machine learning features.

", + "refs": { + "CreateDomainRequest$AIMLOptions": "

Options for all machine learning features for the specified domain.

", + "UpdateDomainConfigRequest$AIMLOptions": "

Options for all machine learning features for the specified domain.

" + } + }, + "AIMLOptionsOutput": { + "base": "

Container for parameters representing the state of machine learning features on the specified domain.

", + "refs": { + "AIMLOptionsStatus$Options": "

Machine learning options on the specified domain.

", + "DomainStatus$AIMLOptions": "

Container for parameters required to enable all machine learning features.

" + } + }, + "AIMLOptionsStatus": { + "base": "

The status of machine learning options on the specified domain.

", + "refs": { + "DomainConfig$AIMLOptions": "

Container for parameters required to enable all machine learning features.

" + } + }, "ARN": { "base": "

The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities in Using Amazon Web Services Identity and Access Management for more information.

", "refs": { @@ -1894,6 +1914,31 @@ "DomainStatus$ModifyingProperties": "

Information about the domain properties that are currently being modified.

" } }, + "NaturalLanguageQueryGenerationCurrentState": { + "base": null, + "refs": { + "NaturalLanguageQueryGenerationOptionsOutput$CurrentState": "

The current state of the natural language query generation feature, indicating completion, in progress, or failure.

" + } + }, + "NaturalLanguageQueryGenerationDesiredState": { + "base": null, + "refs": { + "NaturalLanguageQueryGenerationOptionsInput$DesiredState": "

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

", + "NaturalLanguageQueryGenerationOptionsOutput$DesiredState": "

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + } + }, + "NaturalLanguageQueryGenerationOptionsInput": { + "base": "

Container for parameters required to enable the natural language query generation feature.

", + "refs": { + "AIMLOptionsInput$NaturalLanguageQueryGenerationOptions": "

Container for parameters required for natural language query generation on the specified domain.

" + } + }, + "NaturalLanguageQueryGenerationOptionsOutput": { + "base": "

Container for parameters representing the state of the natural language query generation feature on the specified domain.

", + "refs": { + "AIMLOptionsOutput$NaturalLanguageQueryGenerationOptions": "

Container for parameters required for natural language query generation on the specified domain.

" + } + }, "NextToken": { "base": "

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

", "refs": { @@ -2045,6 +2090,7 @@ "OptionStatus": { "base": "

Provides the current status of an entity.

", "refs": { + "AIMLOptionsStatus$Status": null, "AccessPoliciesStatus$Status": "

The status of the access policy for the domain.

", "AdvancedOptionsStatus$Status": "

The status of advanced options for the specified domain.

", "AdvancedSecurityOptionsStatus$Status": "

Status of the fine-grained access control settings for a domain.

", diff --git a/apis/sagemaker/2017-07-24/api-2.json b/apis/sagemaker/2017-07-24/api-2.json index ed84785734e..113d8706f01 100644 --- a/apis/sagemaker/2017-07-24/api-2.json +++ b/apis/sagemaker/2017-07-24/api-2.json @@ -608,6 +608,19 @@ {"shape":"ResourceLimitExceeded"} ] }, + "CreateOptimizationJob":{ + "name":"CreateOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptimizationJobRequest"}, + "output":{"shape":"CreateOptimizationJobResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ] + }, "CreatePipeline":{ "name":"CreatePipeline", "http":{ @@ -1240,6 +1253,17 @@ }, "input":{"shape":"DeleteNotebookInstanceLifecycleConfigInput"} }, + "DeleteOptimizationJob":{ + "name":"DeleteOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptimizationJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ] + }, "DeletePipeline":{ "name":"DeletePipeline", "http":{ @@ -1896,6 +1920,18 @@ "input":{"shape":"DescribeNotebookInstanceLifecycleConfigInput"}, "output":{"shape":"DescribeNotebookInstanceLifecycleConfigOutput"} }, + "DescribeOptimizationJob":{ + "name":"DescribeOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptimizationJobRequest"}, + "output":{"shape":"DescribeOptimizationJobResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ] + }, "DescribePipeline":{ "name":"DescribePipeline", "http":{ @@ -2718,6 +2754,15 @@ "input":{"shape":"ListNotebookInstancesInput"}, "output":{"shape":"ListNotebookInstancesOutput"} }, + "ListOptimizationJobs":{ + "name":"ListOptimizationJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOptimizationJobsRequest"}, + "output":{"shape":"ListOptimizationJobsResponse"} + }, "ListPipelineExecutionSteps":{ "name":"ListPipelineExecutionSteps", "http":{ @@ -3203,6 +3248,17 @@ }, "input":{"shape":"StopNotebookInstanceInput"} }, + "StopOptimizationJob":{ + "name":"StopOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopOptimizationJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ] + }, "StopPipelineExecution":{ "name":"StopPipelineExecution", "http":{ @@ -3845,6 +3901,29 @@ "max":15, "min":1 }, + "AdditionalModelChannelName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9\\.\\-_]+" + }, + "AdditionalModelDataSource":{ + "type":"structure", + "required":[ + "ChannelName", + "S3DataSource" + ], + "members":{ + "ChannelName":{"shape":"AdditionalModelChannelName"}, + "S3DataSource":{"shape":"S3ModelDataSource"} + } + }, + "AdditionalModelDataSources":{ + "type":"list", + "member":{"shape":"AdditionalModelDataSource"}, + "max":5, + "min":0 + }, "AdditionalS3DataSource":{ "type":"structure", "required":[ @@ -4028,6 +4107,13 @@ "ValidationProfiles":{"shape":"AlgorithmValidationProfiles"} } }, + "AmazonQSettings":{ + "type":"structure", + "members":{ + "Status":{"shape":"FeatureStatus"}, + "QProfileArn":{"shape":"QProfileArn"} + } + }, "AnnotationConsolidationConfig":{ "type":"structure", "required":["AnnotationConsolidationLambdaArn"], @@ -6350,6 +6436,7 @@ "Mode":{"shape":"ContainerMode"}, "ModelDataUrl":{"shape":"Url"}, "ModelDataSource":{"shape":"ModelDataSource"}, + "AdditionalModelDataSources":{"shape":"AdditionalModelDataSources"}, "Environment":{"shape":"EnvironmentMap"}, "ModelPackageName":{"shape":"VersionedArnOrName"}, "InferenceSpecificationName":{"shape":"InferenceSpecificationName"}, @@ -7553,6 +7640,37 @@ "NotebookInstanceArn":{"shape":"NotebookInstanceArn"} } }, + "CreateOptimizationJobRequest":{ + "type":"structure", + "required":[ + "OptimizationJobName", + "RoleArn", + "ModelSource", + "DeploymentInstanceType", + "OptimizationConfigs", + "OutputConfig", + "StoppingCondition" + ], + "members":{ + "OptimizationJobName":{"shape":"EntityName"}, + "RoleArn":{"shape":"RoleArn"}, + "ModelSource":{"shape":"OptimizationJobModelSource"}, + "DeploymentInstanceType":{"shape":"OptimizationJobDeploymentInstanceType"}, + "OptimizationEnvironment":{"shape":"OptimizationJobEnvironmentVariables"}, + "OptimizationConfigs":{"shape":"OptimizationConfigs"}, + "OutputConfig":{"shape":"OptimizationJobOutputConfig"}, + "StoppingCondition":{"shape":"StoppingCondition"}, + "Tags":{"shape":"TagList"}, + "VpcConfig":{"shape":"OptimizationVpcConfig"} + } + }, + "CreateOptimizationJobResponse":{ + "type":"structure", + "required":["OptimizationJobArn"], + "members":{ + "OptimizationJobArn":{"shape":"OptimizationJobArn"} + } + }, "CreatePipelineRequest":{ "type":"structure", "required":[ @@ -8638,6 +8756,13 @@ "NotebookInstanceLifecycleConfigName":{"shape":"NotebookInstanceLifecycleConfigName"} } }, + "DeleteOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{"shape":"EntityName"} + } + }, "DeletePipelineRequest":{ "type":"structure", "required":[ @@ -10318,6 +10443,48 @@ "InstanceMetadataServiceConfiguration":{"shape":"InstanceMetadataServiceConfiguration"} } }, + "DescribeOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{"shape":"EntityName"} + } + }, + "DescribeOptimizationJobResponse":{ + "type":"structure", + "required":[ + "OptimizationJobArn", + "OptimizationJobStatus", + "CreationTime", + "LastModifiedTime", + "OptimizationJobName", + "ModelSource", + "DeploymentInstanceType", + "OptimizationConfigs", + "OutputConfig", + "RoleArn", + "StoppingCondition" + ], + "members":{ + "OptimizationJobArn":{"shape":"OptimizationJobArn"}, + "OptimizationJobStatus":{"shape":"OptimizationJobStatus"}, + "OptimizationStartTime":{"shape":"Timestamp"}, + "OptimizationEndTime":{"shape":"Timestamp"}, + "CreationTime":{"shape":"CreationTime"}, + "LastModifiedTime":{"shape":"LastModifiedTime"}, + "FailureReason":{"shape":"FailureReason"}, + "OptimizationJobName":{"shape":"EntityName"}, + "ModelSource":{"shape":"OptimizationJobModelSource"}, + "OptimizationEnvironment":{"shape":"OptimizationJobEnvironmentVariables"}, + "DeploymentInstanceType":{"shape":"OptimizationJobDeploymentInstanceType"}, + "OptimizationConfigs":{"shape":"OptimizationConfigs"}, + "OutputConfig":{"shape":"OptimizationJobOutputConfig"}, + "OptimizationOutput":{"shape":"OptimizationOutput"}, + "RoleArn":{"shape":"RoleArn"}, + "StoppingCondition":{"shape":"StoppingCondition"}, + "VpcConfig":{"shape":"OptimizationVpcConfig"} + } + }, "DescribePipelineDefinitionForExecutionRequest":{ "type":"structure", "required":["PipelineExecutionArn"], @@ -11047,7 +11214,8 @@ "SecurityGroupIds":{"shape":"DomainSecurityGroupIds"}, "RStudioServerProDomainSettings":{"shape":"RStudioServerProDomainSettings"}, "ExecutionRoleIdentityConfig":{"shape":"ExecutionRoleIdentityConfig"}, - "DockerSettings":{"shape":"DockerSettings"} + "DockerSettings":{"shape":"DockerSettings"}, + "AmazonQSettings":{"shape":"AmazonQSettings"} } }, "DomainSettingsForUpdate":{ @@ -11056,7 +11224,8 @@ "RStudioServerProDomainSettingsForUpdate":{"shape":"RStudioServerProDomainSettingsForUpdate"}, "ExecutionRoleIdentityConfig":{"shape":"ExecutionRoleIdentityConfig"}, "SecurityGroupIds":{"shape":"DomainSecurityGroupIds"}, - "DockerSettings":{"shape":"DockerSettings"} + "DockerSettings":{"shape":"DockerSettings"}, + "AmazonQSettings":{"shape":"AmazonQSettings"} } }, "DomainStatus":{ @@ -15724,6 +15893,41 @@ "NotebookInstances":{"shape":"NotebookInstanceSummaryList"} } }, + "ListOptimizationJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{ + "shape":"MaxResults", + "box":true + }, + "CreationTimeAfter":{"shape":"CreationTime"}, + "CreationTimeBefore":{"shape":"CreationTime"}, + "LastModifiedTimeAfter":{"shape":"LastModifiedTime"}, + "LastModifiedTimeBefore":{"shape":"LastModifiedTime"}, + "OptimizationContains":{"shape":"NameContains"}, + "NameContains":{"shape":"NameContains"}, + "StatusEquals":{"shape":"OptimizationJobStatus"}, + "SortBy":{"shape":"ListOptimizationJobsSortBy"}, + "SortOrder":{"shape":"SortOrder"} + } + }, + "ListOptimizationJobsResponse":{ + "type":"structure", + "required":["OptimizationJobSummaries"], + "members":{ + "OptimizationJobSummaries":{"shape":"OptimizationJobSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListOptimizationJobsSortBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status" + ] + }, "ListPipelineExecutionStepsRequest":{ "type":"structure", "members":{ @@ -16691,6 +16895,13 @@ "InvocationsMaxRetries":{"shape":"InvocationsMaxRetries"} } }, + "ModelCompilationConfig":{ + "type":"structure", + "members":{ + "Image":{"shape":"OptimizationContainerImage"}, + "OverrideEnvironment":{"shape":"OptimizationJobEnvironmentVariables"} + } + }, "ModelCompressionType":{ "type":"string", "enum":[ @@ -17247,6 +17458,13 @@ "GroundTruthS3Input":{"shape":"MonitoringGroundTruthS3Input"} } }, + "ModelQuantizationConfig":{ + "type":"structure", + "members":{ + "Image":{"shape":"OptimizationContainerImage"}, + "OverrideEnvironment":{"shape":"OptimizationJobEnvironmentVariables"} + } + }, "ModelRegisterSettings":{ "type":"structure", "members":{ @@ -18196,6 +18414,176 @@ "In" ] }, + "OptimizationConfig":{ + "type":"structure", + "members":{ + "ModelQuantizationConfig":{"shape":"ModelQuantizationConfig"}, + "ModelCompilationConfig":{"shape":"ModelCompilationConfig"} + }, + "union":true + }, + "OptimizationConfigs":{ + "type":"list", + "member":{"shape":"OptimizationConfig"}, + "max":10 + }, + "OptimizationContainerImage":{ + "type":"string", + "max":255, + "pattern":"[\\S]+" + }, + "OptimizationJobArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:optimization-job/.*" + }, + "OptimizationJobDeploymentInstanceType":{ + "type":"string", + "enum":[ + "ml.p4d.24xlarge", + "ml.p4de.24xlarge", + "ml.p5.48xlarge", + "ml.g5.xlarge", + "ml.g5.2xlarge", + "ml.g5.4xlarge", + "ml.g5.8xlarge", + "ml.g5.12xlarge", + "ml.g5.16xlarge", + "ml.g5.24xlarge", + "ml.g5.48xlarge", + "ml.g6.xlarge", + "ml.g6.2xlarge", + "ml.g6.4xlarge", + "ml.g6.8xlarge", + "ml.g6.12xlarge", + "ml.g6.16xlarge", + "ml.g6.24xlarge", + "ml.g6.48xlarge", + "ml.inf2.xlarge", + "ml.inf2.8xlarge", + "ml.inf2.24xlarge", + "ml.inf2.48xlarge", + "ml.trn1.2xlarge", + "ml.trn1.32xlarge", + "ml.trn1n.32xlarge" + ] + }, + "OptimizationJobEnvironmentVariables":{ + "type":"map", + "key":{"shape":"NonEmptyString256"}, + "value":{"shape":"String256"}, + "max":25 + }, + "OptimizationJobModelSource":{ + "type":"structure", + "members":{ + "S3":{"shape":"OptimizationJobModelSourceS3"} + } + }, + "OptimizationJobModelSourceS3":{ + "type":"structure", + "members":{ + "S3Uri":{"shape":"S3Uri"}, + "ModelAccessConfig":{"shape":"OptimizationModelAccessConfig"} + } + }, + "OptimizationJobOutputConfig":{ + "type":"structure", + "required":["S3OutputLocation"], + "members":{ + "KmsKeyId":{"shape":"KmsKeyId"}, + "S3OutputLocation":{"shape":"S3Uri"} + } + }, + "OptimizationJobStatus":{ + "type":"string", + "enum":[ + "INPROGRESS", + "COMPLETED", + "FAILED", + "STARTING", + "STOPPING", + "STOPPED" + ] + }, + "OptimizationJobSummaries":{ + "type":"list", + "member":{"shape":"OptimizationJobSummary"} + }, + "OptimizationJobSummary":{ + "type":"structure", + "required":[ + "OptimizationJobName", + "OptimizationJobArn", + "CreationTime", + "OptimizationJobStatus", + "DeploymentInstanceType", + "OptimizationTypes" + ], + "members":{ + "OptimizationJobName":{"shape":"EntityName"}, + "OptimizationJobArn":{"shape":"OptimizationJobArn"}, + "CreationTime":{"shape":"CreationTime"}, + "OptimizationJobStatus":{"shape":"OptimizationJobStatus"}, + "OptimizationStartTime":{"shape":"Timestamp"}, + "OptimizationEndTime":{"shape":"Timestamp"}, + "LastModifiedTime":{"shape":"LastModifiedTime"}, + "DeploymentInstanceType":{"shape":"OptimizationJobDeploymentInstanceType"}, + "OptimizationTypes":{"shape":"OptimizationTypes"} + } + }, + "OptimizationModelAcceptEula":{"type":"boolean"}, + "OptimizationModelAccessConfig":{ + "type":"structure", + "required":["AcceptEula"], + "members":{ + "AcceptEula":{"shape":"OptimizationModelAcceptEula"} + } + }, + "OptimizationOutput":{ + "type":"structure", + "members":{ + "RecommendedInferenceImage":{"shape":"OptimizationContainerImage"} + } + }, + "OptimizationType":{"type":"string"}, + "OptimizationTypes":{ + "type":"list", + "member":{"shape":"OptimizationType"} + }, + "OptimizationVpcConfig":{ + "type":"structure", + "required":[ + "SecurityGroupIds", + "Subnets" + ], + "members":{ + "SecurityGroupIds":{"shape":"OptimizationVpcSecurityGroupIds"}, + "Subnets":{"shape":"OptimizationVpcSubnets"} + } + }, + "OptimizationVpcSecurityGroupId":{ + "type":"string", + "max":32, + "pattern":"[-0-9a-zA-Z]+" + }, + "OptimizationVpcSecurityGroupIds":{ + "type":"list", + "member":{"shape":"OptimizationVpcSecurityGroupId"}, + "max":5, + "min":1 + }, + "OptimizationVpcSubnetId":{ + "type":"string", + "max":32, + "pattern":"[-0-9a-zA-Z]+" + }, + "OptimizationVpcSubnets":{ + "type":"list", + "member":{"shape":"OptimizationVpcSubnetId"}, + "max":16, + "min":1 + }, "OptionalDouble":{"type":"double"}, "OptionalInteger":{"type":"integer"}, "OptionalVolumeSizeInGB":{ @@ -19529,6 +19917,10 @@ "ModelPackageGroupArn":{"shape":"ModelPackageGroupArn"} } }, + "QProfileArn":{ + "type":"string", + "pattern":"^arn:[-.a-z0-9]{1,63}:codewhisperer:([-.a-z0-9]{0,63}:){2}([a-zA-Z0-9-_:/]){1,1023}$" + }, "QualityCheckStepMetadata":{ "type":"structure", "members":{ @@ -21339,6 +21731,13 @@ "NotebookInstanceName":{"shape":"NotebookInstanceName"} } }, + "StopOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{"shape":"EntityName"} + } + }, "StopPipelineExecutionRequest":{ "type":"structure", "required":[ diff --git a/apis/sagemaker/2017-07-24/docs-2.json b/apis/sagemaker/2017-07-24/docs-2.json index d448f805112..a5cc59e24da 100644 --- a/apis/sagemaker/2017-07-24/docs-2.json +++ b/apis/sagemaker/2017-07-24/docs-2.json @@ -50,6 +50,7 @@ "CreateMonitoringSchedule": "

Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endpoint.

", "CreateNotebookInstance": "

Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.

In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.

SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker with a specific algorithm or with a machine learning framework.

After receiving the request, SageMaker does the following:

  1. Creates a network interface in the SageMaker VPC.

  2. (Option) If you specified SubnetId, SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.

  3. Launches an EC2 instance of the type specified in the request in the SageMaker VPC. If you specified SubnetId of your VPC, SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.

After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it.

After SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker endpoints, and validate hosted models.

For more information, see How It Works.

", "CreateNotebookInstanceLifecycleConfig": "

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

", + "CreateOptimizationJob": "

Creates a job that optimizes a model for inference performance. To create the job, you provide the location of a source model, and you provide the settings for the optimization techniques that you want the job to apply. When the job completes successfully, SageMaker uploads the new optimized model to the output destination that you specify.

For more information about how to use this action, and about the supported optimization techniques, see Optimize model inference with Amazon SageMaker.

", "CreatePipeline": "

Creates a pipeline using a JSON pipeline definition.

", "CreatePresignedDomainUrl": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM.

The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.

You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint .

The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.

", "CreatePresignedMlflowTrackingServerUrl": "

Returns a presigned URL that you can use to connect to the MLflow UI attached to your tracking server. For more information, see Launch the MLflow UI using a presigned URL.

", @@ -106,6 +107,7 @@ "DeleteMonitoringSchedule": "

Deletes a monitoring schedule. Also stops the schedule had not already been stopped. This does not delete the job execution history of the monitoring schedule.

", "DeleteNotebookInstance": "

Deletes an SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API.

When you delete a notebook instance, you lose all of your data. SageMaker removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance.

", "DeleteNotebookInstanceLifecycleConfig": "

Deletes a notebook instance lifecycle configuration.

", + "DeleteOptimizationJob": "

Deletes an optimization job.

", "DeletePipeline": "

Deletes a pipeline if there are no running instances of the pipeline. To delete a pipeline, you must stop all running instances of the pipeline using the StopPipelineExecution API. When you delete a pipeline, all instances of the pipeline are deleted.

", "DeleteProject": "

Delete the specified project.

", "DeleteSpace": "

Used to delete a space.

", @@ -164,6 +166,7 @@ "DescribeMonitoringSchedule": "

Describes the schedule for a monitoring job.

", "DescribeNotebookInstance": "

Returns information about a notebook instance.

", "DescribeNotebookInstanceLifecycleConfig": "

Returns a description of a notebook instance lifecycle configuration.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

", + "DescribeOptimizationJob": "

Provides the properties of the specified optimization job.

", "DescribePipeline": "

Describes the details of a pipeline.

", "DescribePipelineDefinitionForExecution": "

Describes the details of an execution's pipeline definition.

", "DescribePipelineExecution": "

Describes the details of a pipeline execution.

", @@ -245,6 +248,7 @@ "ListMonitoringSchedules": "

Returns list of all monitoring schedules.

", "ListNotebookInstanceLifecycleConfigs": "

Lists notebook instance lifestyle configurations created with the CreateNotebookInstanceLifecycleConfig API.

", "ListNotebookInstances": "

Returns a list of the SageMaker notebook instances in the requester's account in an Amazon Web Services Region.

", + "ListOptimizationJobs": "

Lists the optimization jobs in your account and their properties.

", "ListPipelineExecutionSteps": "

Gets a list of PipeLineExecutionStep objects.

", "ListPipelineExecutions": "

Gets a list of the pipeline executions.

", "ListPipelineParametersForExecution": "

Gets a list of parameters for a pipeline execution.

", @@ -290,6 +294,7 @@ "StopMlflowTrackingServer": "

Programmatically stop an MLflow Tracking Server.

", "StopMonitoringSchedule": "

Stops a previously started monitoring schedule.

", "StopNotebookInstance": "

Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance.

To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.

", + "StopOptimizationJob": "

Ends a running inference optimization job.

", "StopPipelineExecution": "

Stops a pipeline execution.

Callback Step

A pipeline execution won't stop while a callback step is running. When you call StopPipelineExecution on a pipeline execution with a running callback step, SageMaker Pipelines sends an additional Amazon SQS message to the specified SQS queue. The body of the SQS message contains a \"Status\" field which is set to \"Stopping\".

You should add logic to your Amazon SQS message consumer to take any needed action (for example, resource cleanup) upon receipt of the message followed by a call to SendPipelineExecutionStepSuccess or SendPipelineExecutionStepFailure.

Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

Lambda Step

A pipeline execution can't be stopped while a lambda step is running because the Lambda function invoked by the lambda step can't be stopped. If you attempt to stop the execution while the Lambda function is running, the pipeline waits for the Lambda function to finish or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function finishes, the pipeline execution status is Stopped. If the timeout is hit the pipeline execution status is Failed.

", "StopProcessingJob": "

Stops a processing job.

", "StopTrainingJob": "

Stops a training job. To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of the training is not lost.

When it receives a StopTrainingJob request, SageMaker changes the status of the job to Stopping. After SageMaker stops the job, it sets the status to Stopped.

", @@ -436,6 +441,24 @@ "UpdateModelPackageInput$AdditionalInferenceSpecificationsToAdd": "

An array of additional Inference Specification objects to be added to the existing array additional Inference Specification. Total number of additional Inference Specifications can not exceed 15. Each additional Inference Specification specifies artifacts based on this model package that can be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts.

" } }, + "AdditionalModelChannelName": { + "base": null, + "refs": { + "AdditionalModelDataSource$ChannelName": "

A custom name for this AdditionalModelDataSource object.

" + } + }, + "AdditionalModelDataSource": { + "base": "

Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action.

", + "refs": { + "AdditionalModelDataSources$member": null + } + }, + "AdditionalModelDataSources": { + "base": null, + "refs": { + "ContainerDefinition$AdditionalModelDataSources": "

Data sources that are available to your model in addition to the one that you specify for ModelDataSource when you use the CreateModel action.

" + } + }, "AdditionalS3DataSource": { "base": "

A data source used for training or inference that is in addition to the input dataset or model data.

", "refs": { @@ -579,6 +602,13 @@ "DescribeAlgorithmOutput$ValidationSpecification": "

Details about configurations for one or more training jobs that SageMaker runs to test the algorithm.

" } }, + "AmazonQSettings": { + "base": "

A collection of settings that configure the Amazon Q experience within the domain.

", + "refs": { + "DomainSettings$AmazonQSettings": "

A collection of settings that configure the Amazon Q experience within the domain. The AuthMode that you use to create the domain must be SSO.

", + "DomainSettingsForUpdate$AmazonQSettings": "

A collection of settings that configure the Amazon Q experience within the domain.

" + } + }, "AnnotationConsolidationConfig": { "base": "

Configures how labels are consolidated across human workers and processes output data.

", "refs": { @@ -3097,6 +3127,16 @@ "refs": { } }, + "CreateOptimizationJobRequest": { + "base": null, + "refs": { + } + }, + "CreateOptimizationJobResponse": { + "base": null, + "refs": { + } + }, "CreatePipelineRequest": { "base": null, "refs": { @@ -3266,6 +3306,7 @@ "DescribeModelPackageOutput$CreationTime": "

A timestamp specifying when the model package was created.

", "DescribeNotebookInstanceLifecycleConfigOutput$CreationTime": "

A timestamp that tells when the lifecycle configuration was created.

", "DescribeNotebookInstanceOutput$CreationTime": "

A timestamp. Use this parameter to return the time when the notebook instance was created

", + "DescribeOptimizationJobResponse$CreationTime": "

The time when you created the optimization job.

", "DescribeSpaceResponse$CreationTime": "

The creation time.

", "DescribeUserProfileResponse$CreationTime": "

The creation time.

", "DomainDetails$CreationTime": "

The creation time.

", @@ -3290,12 +3331,15 @@ "ListNotebookInstanceLifecycleConfigsInput$CreationTimeAfter": "

A filter that returns only lifecycle configurations that were created after the specified time (timestamp).

", "ListNotebookInstancesInput$CreationTimeBefore": "

A filter that returns only notebook instances that were created before the specified time (timestamp).

", "ListNotebookInstancesInput$CreationTimeAfter": "

A filter that returns only notebook instances that were created after the specified time (timestamp).

", + "ListOptimizationJobsRequest$CreationTimeAfter": "

Filters the results to only those optimization jobs that were created after the specified time.

", + "ListOptimizationJobsRequest$CreationTimeBefore": "

Filters the results to only those optimization jobs that were created before the specified time.

", "ModelPackage$CreationTime": "

The time that the model package was created.

", "ModelPackageGroup$CreationTime": "

The time that the model group was created.

", "ModelPackageGroupSummary$CreationTime": "

The time that the model group was created.

", "ModelPackageSummary$CreationTime": "

A timestamp that shows when the model package was created.

", "NotebookInstanceLifecycleConfigSummary$CreationTime": "

A timestamp that tells when the lifecycle configuration was created.

", "NotebookInstanceSummary$CreationTime": "

A timestamp that shows when the notebook instance was created.

", + "OptimizationJobSummary$CreationTime": "

The time when you created the optimization job.

", "SpaceDetails$CreationTime": "

The creation time.

", "UserProfileDetails$CreationTime": "

The creation time.

" } @@ -3844,6 +3888,11 @@ "refs": { } }, + "DeleteOptimizationJobRequest": { + "base": null, + "refs": { + } + }, "DeletePipelineRequest": { "base": null, "refs": { @@ -4477,6 +4526,16 @@ "refs": { } }, + "DescribeOptimizationJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeOptimizationJobResponse": { + "base": null, + "refs": { + } + }, "DescribePipelineDefinitionForExecutionRequest": { "base": null, "refs": { @@ -5545,6 +5604,7 @@ "CreateModelCardRequest$ModelCardName": "

The unique name of the model card.

", "CreateModelPackageGroupInput$ModelPackageGroupName": "

The name of the model group.

", "CreateModelPackageInput$ModelPackageName": "

The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).

This parameter is required for unversioned models. It is not applicable to versioned models.

", + "CreateOptimizationJobRequest$OptimizationJobName": "

A custom name for the new optimization job.

", "DeleteAlgorithmInput$AlgorithmName": "

The name of the algorithm to delete.

", "DeleteCodeRepositoryInput$CodeRepositoryName": "

The name of the Git repository to delete.

", "DeleteCompilationJobRequest$CompilationJobName": "

The name of the compilation job to delete.

", @@ -5554,6 +5614,7 @@ "DeleteEdgeDeploymentStageRequest$StageName": "

The name of the stage.

", "DeleteModelCardRequest$ModelCardName": "

The name of the model card to delete.

", "DeleteModelPackageGroupPolicyInput$ModelPackageGroupName": "

The name of the model group for which to delete the policy.

", + "DeleteOptimizationJobRequest$OptimizationJobName": "

The name that you assigned to the optimization job.

", "DeploymentStage$StageName": "

The name of the stage.

", "DeploymentStageStatusSummary$StageName": "

The name of the stage.

", "DeregisterDevicesRequest$DeviceFleetName": "

The name of the fleet the devices belong to.

", @@ -5581,6 +5642,8 @@ "DescribeModelPackageGroupOutput$ModelPackageGroupName": "

The name of the model group.

", "DescribeModelPackageOutput$ModelPackageName": "

The name of the model package being described.

", "DescribeModelPackageOutput$ModelPackageGroupName": "

If the model is a versioned model, the name of the model group that the versioned model belongs to.

", + "DescribeOptimizationJobRequest$OptimizationJobName": "

The name that you assigned to the optimization job.

", + "DescribeOptimizationJobResponse$OptimizationJobName": "

The name that you assigned to the optimization job.

", "DeviceDeploymentSummary$EdgeDeploymentPlanName": "

The name of the edge deployment plan.

", "DeviceDeploymentSummary$StageName": "

The name of the stage in the edge deployment plan.

", "DeviceDeploymentSummary$DeployedStageName": "

The name of the deployed stage.

", @@ -5622,6 +5685,7 @@ "ModelPackageSummary$ModelPackageName": "

The name of the model package.

", "ModelPackageSummary$ModelPackageGroupName": "

If the model package is a versioned model, the model group that the versioned model belongs to.

", "ModelPackageValidationProfile$ProfileName": "

The name of the profile for the model package.

", + "OptimizationJobSummary$OptimizationJobName": "

The name that you assigned to the optimization job.

", "PutModelPackageGroupPolicyInput$ModelPackageGroupName": "

The name of the model group to add a resource policy to.

", "RegisterDevicesRequest$DeviceFleetName": "

The name of the fleet.

", "StartEdgeDeploymentStageRequest$EdgeDeploymentPlanName": "

The name of the edge deployment plan to start.

", @@ -5630,6 +5694,7 @@ "StopEdgeDeploymentStageRequest$EdgeDeploymentPlanName": "

The name of the edge deployment plan to stop.

", "StopEdgeDeploymentStageRequest$StageName": "

The name of the stage to stop.

", "StopEdgePackagingJobRequest$EdgePackagingJobName": "

The name of the edge packaging job.

", + "StopOptimizationJobRequest$OptimizationJobName": "

The name that you assigned to the optimization job.

", "UpdateCodeRepositoryInput$CodeRepositoryName": "

The name of the Git repository to update.

", "UpdateDeviceFleetRequest$DeviceFleetName": "

The name of the fleet.

", "UpdateDevicesRequest$DeviceFleetName": "

The name of the fleet the devices belong to.

" @@ -5922,6 +5987,7 @@ "DescribeModelCardExportJobResponse$FailureReason": "

The failure reason if the model export job fails.

", "DescribeMonitoringScheduleResponse$FailureReason": "

A string, up to one KB in size, that contains the reason a monitoring job failed, if it failed.

", "DescribeNotebookInstanceOutput$FailureReason": "

If status is Failed, the reason it failed.

", + "DescribeOptimizationJobResponse$FailureReason": "

If the optimization job status is FAILED, the reason for the failure.

", "DescribeProcessingJobResponse$FailureReason": "

A string, up to one KB in size, that contains the reason a processing job failed, if it failed.

", "DescribeSpaceResponse$FailureReason": "

The failure reason.

", "DescribeTrainingJobResponse$FailureReason": "

If the training job failed, the reason it failed.

", @@ -6130,6 +6196,7 @@ "FeatureStatus": { "base": null, "refs": { + "AmazonQSettings$Status": "

Whether Amazon Q has been enabled within the domain.

", "DirectDeploySettings$Status": "

Describes whether model deployment permissions are enabled or disabled in the Canvas application.

", "DockerSettings$EnableDockerAccess": "

Indicates whether the domain can access Docker.

", "IdentityProviderOAuthSetting$Status": "

Describes whether OAuth for a data source is enabled or disabled in the Canvas application.

", @@ -8064,6 +8131,7 @@ "MonitoringClusterConfig$VolumeKmsKeyId": "

The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.

", "MonitoringOutputConfig$KmsKeyId": "

The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.

", "OnlineStoreSecurityConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (KMS) key ARN that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.

The caller (either user or IAM role) of CreateFeatureGroup must have below permissions to the OnlineStore KmsKeyId:

The caller (either user or IAM role) to all DataPlane operations (PutRecord, GetRecord, DeleteRecord) must have the following permissions to the KmsKeyId:

", + "OptimizationJobOutputConfig$KmsKeyId": "

The Amazon Resource Name (ARN) of a key in Amazon Web Services KMS. SageMaker uses they key to encrypt the artifacts of the optimized model when SageMaker uploads the model to Amazon S3.

", "OutputConfig$KmsKeyId": "

The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

The KmsKeyId can be any of the following formats:

", "OutputDataConfig$KmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

If you use a KMS key ID or an alias of your KMS key, the SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide. If the output data is stored in Amazon S3 Express One Zone, it is encrypted with server-side encryption with Amazon S3 managed keys (SSE-S3). KMS key is not supported for Amazon S3 Express One Zone

The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

", "ProcessingClusterConfig$VolumeKmsKeyId": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the processing job.

Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

For a list of instance types that support local instance storage, see Instance Store Volumes.

For more information about local instance storage encryption, see SSD Instance Store Volumes.

", @@ -8275,6 +8343,7 @@ "DescribeInferenceRecommendationsJobResponse$LastModifiedTime": "

A timestamp that shows when the job was last modified.

", "DescribeNotebookInstanceLifecycleConfigOutput$LastModifiedTime": "

A timestamp that tells when the lifecycle configuration was last modified.

", "DescribeNotebookInstanceOutput$LastModifiedTime": "

A timestamp. Use this parameter to retrieve the time when the notebook instance was last modified.

", + "DescribeOptimizationJobResponse$LastModifiedTime": "

The time when the optimization job was last updated.

", "DescribeSpaceResponse$LastModifiedTime": "

The last modified time.

", "DescribeUserProfileResponse$LastModifiedTime": "

The last modified time.

", "DomainDetails$LastModifiedTime": "

The last modified time.

", @@ -8289,8 +8358,11 @@ "ListNotebookInstanceLifecycleConfigsInput$LastModifiedTimeAfter": "

A filter that returns only lifecycle configurations that were modified after the specified time (timestamp).

", "ListNotebookInstancesInput$LastModifiedTimeBefore": "

A filter that returns only notebook instances that were modified before the specified time (timestamp).

", "ListNotebookInstancesInput$LastModifiedTimeAfter": "

A filter that returns only notebook instances that were modified after the specified time (timestamp).

", + "ListOptimizationJobsRequest$LastModifiedTimeAfter": "

Filters the results to only those optimization jobs that were updated after the specified time.

", + "ListOptimizationJobsRequest$LastModifiedTimeBefore": "

Filters the results to only those optimization jobs that were updated before the specified time.

", "NotebookInstanceLifecycleConfigSummary$LastModifiedTime": "

A timestamp that tells when the lifecycle configuration was last modified.

", "NotebookInstanceSummary$LastModifiedTime": "

A timestamp that shows when the notebook instance was last modified.

", + "OptimizationJobSummary$LastModifiedTime": "

The time when the optimization job was last updated.

", "SpaceDetails$LastModifiedTime": "

The last modified time.

", "UserProfileDetails$LastModifiedTime": "

The last modified time.

" } @@ -8981,6 +9053,22 @@ "refs": { } }, + "ListOptimizationJobsRequest": { + "base": null, + "refs": { + } + }, + "ListOptimizationJobsResponse": { + "base": null, + "refs": { + } + }, + "ListOptimizationJobsSortBy": { + "base": null, + "refs": { + "ListOptimizationJobsRequest$SortBy": "

The field by which to sort the optimization jobs in the response. The default is CreationTime

" + } + }, "ListPipelineExecutionStepsRequest": { "base": null, "refs": { @@ -9393,6 +9481,7 @@ "ListMonitoringSchedulesRequest$MaxResults": "

The maximum number of jobs to return in the response. The default value is 10.

", "ListNotebookInstanceLifecycleConfigsInput$MaxResults": "

The maximum number of lifecycle configurations to return in the response.

", "ListNotebookInstancesInput$MaxResults": "

The maximum number of notebook instances to return.

", + "ListOptimizationJobsRequest$MaxResults": "

The maximum number of optimization jobs to return in the response. The default is 50.

", "ListPipelineExecutionStepsRequest$MaxResults": "

The maximum number of pipeline execution steps to return in the response.

", "ListPipelineExecutionsRequest$MaxResults": "

The maximum number of pipeline executions to return in the response.

", "ListPipelineParametersForExecutionRequest$MaxResults": "

The maximum number of parameters to return in the response.

", @@ -9850,6 +9939,12 @@ "TransformJob$ModelClientConfig": null } }, + "ModelCompilationConfig": { + "base": "

Settings for the model compilation technique that's applied by a model optimization job.

", + "refs": { + "OptimizationConfig$ModelCompilationConfig": "

Settings for the model compilation technique that's applied by a model optimization job.

" + } + }, "ModelCompressionType": { "base": null, "refs": { @@ -10305,6 +10400,12 @@ "DescribeModelQualityJobDefinitionResponse$ModelQualityJobInput": "

Inputs for the model quality job.

" } }, + "ModelQuantizationConfig": { + "base": "

Settings for the model quantization technique that's applied by a model optimization job.

", + "refs": { + "OptimizationConfig$ModelQuantizationConfig": "

Settings for the model quantization technique that's applied by a model optimization job.

" + } + }, "ModelRegisterSettings": { "base": "

The model registry settings for the SageMaker Canvas application.

", "refs": { @@ -10877,6 +10978,8 @@ "ListModelPackagesInput$NameContains": "

A string in the model package name. This filter returns only model packages whose name contains the specified string.

", "ListModelQualityJobDefinitionsRequest$NameContains": "

A string in the transform job name. This filter returns only model quality monitoring job definitions whose name contains the specified string.

", "ListMonitoringSchedulesRequest$NameContains": "

Filter for monitoring schedules whose name contains a specified string.

", + "ListOptimizationJobsRequest$OptimizationContains": "

Filters the results to only those optimization jobs that apply the specified optimization techniques. You can specify either Quantization or Compilation.

", + "ListOptimizationJobsRequest$NameContains": "

Filters the results to only those optimization jobs with a name that contains the specified string.

", "ListTrainingJobsRequest$NameContains": "

A string in the training job name. This filter returns only training jobs whose name contains the specified string.

", "ListTransformJobsRequest$NameContains": "

A string in the transform job name. This filter returns only transform jobs whose name contains the specified string.

" } @@ -11052,6 +11155,8 @@ "ListNotebookInstanceLifecycleConfigsOutput$NextToken": "

If the response is truncated, SageMaker returns this token. To get the next set of lifecycle configurations, use it in the next request.

", "ListNotebookInstancesInput$NextToken": "

If the previous call to the ListNotebookInstances is truncated, the response includes a NextToken. You can use this token in your subsequent ListNotebookInstances request to fetch the next set of notebook instances.

You might specify a filter or a sort order in your request. When response is truncated, you must use the same values for the filer and sort order in the next request.

", "ListNotebookInstancesOutput$NextToken": "

If the response to the previous ListNotebookInstances request was truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use the token in the next request.

", + "ListOptimizationJobsRequest$NextToken": "

A token that you use to get the next set of results following a truncated response. If the response to the previous request was truncated, that response provides the value for this token.

", + "ListOptimizationJobsResponse$NextToken": "

The token to use in a subsequent request to get the next set of results following a truncated response.

", "ListPipelineExecutionStepsRequest$NextToken": "

If the result of the previous ListPipelineExecutionSteps request was truncated, the response includes a NextToken. To retrieve the next set of pipeline execution steps, use the token in the next request.

", "ListPipelineExecutionStepsResponse$NextToken": "

If the result of the previous ListPipelineExecutionSteps request was truncated, the response includes a NextToken. To retrieve the next set of pipeline execution steps, use the token in the next request.

", "ListPipelineExecutionsRequest$NextToken": "

If the result of the previous ListPipelineExecutions request was truncated, the response includes a NextToken. To retrieve the next set of pipeline executions, use the token in the next request.

", @@ -11100,7 +11205,8 @@ "base": null, "refs": { "CustomImageContainerEntrypoint$member": null, - "CustomImageContainerEnvironmentVariables$key": null + "CustomImageContainerEnvironmentVariables$key": null, + "OptimizationJobEnvironmentVariables$key": null } }, "NonEmptyString64": { @@ -11439,6 +11545,153 @@ "Filter$Operator": "

A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:

Equals

The value of Name equals Value.

NotEquals

The value of Name doesn't equal Value.

Exists

The Name property exists.

NotExists

The Name property does not exist.

GreaterThan

The value of Name is greater than Value. Not supported for text properties.

GreaterThanOrEqualTo

The value of Name is greater than or equal to Value. Not supported for text properties.

LessThan

The value of Name is less than Value. Not supported for text properties.

LessThanOrEqualTo

The value of Name is less than or equal to Value. Not supported for text properties.

In

The value of Name is one of the comma delimited strings in Value. Only supported for text properties.

Contains

The value of Name contains the string Value. Only supported for text properties.

A SearchExpression can include the Contains operator multiple times when the value of Name is one of the following:

  • Experiment.DisplayName

  • Experiment.ExperimentName

  • Experiment.Tags

  • Trial.DisplayName

  • Trial.TrialName

  • Trial.Tags

  • TrialComponent.DisplayName

  • TrialComponent.TrialComponentName

  • TrialComponent.Tags

  • TrialComponent.InputArtifacts

  • TrialComponent.OutputArtifacts

A SearchExpression can include only one Contains operator for all other values of Name. In these cases, if you include multiple Contains operators in the SearchExpression, the result is the following error message: \"'CONTAINS' operator usage limit of 1 exceeded.\"

" } }, + "OptimizationConfig": { + "base": "

Settings for an optimization technique that you apply with a model optimization job.

", + "refs": { + "OptimizationConfigs$member": null + } + }, + "OptimizationConfigs": { + "base": null, + "refs": { + "CreateOptimizationJobRequest$OptimizationConfigs": "

Settings for each of the optimization techniques that the job applies.

", + "DescribeOptimizationJobResponse$OptimizationConfigs": "

Settings for each of the optimization techniques that the job applies.

" + } + }, + "OptimizationContainerImage": { + "base": null, + "refs": { + "ModelCompilationConfig$Image": "

The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.

", + "ModelQuantizationConfig$Image": "

The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.

", + "OptimizationOutput$RecommendedInferenceImage": "

The image that SageMaker recommends that you use to host the optimized model that you created with an optimization job.

" + } + }, + "OptimizationJobArn": { + "base": null, + "refs": { + "CreateOptimizationJobResponse$OptimizationJobArn": "

The Amazon Resource Name (ARN) of the optimization job.

", + "DescribeOptimizationJobResponse$OptimizationJobArn": "

The Amazon Resource Name (ARN) of the optimization job.

", + "OptimizationJobSummary$OptimizationJobArn": "

The Amazon Resource Name (ARN) of the optimization job.

" + } + }, + "OptimizationJobDeploymentInstanceType": { + "base": null, + "refs": { + "CreateOptimizationJobRequest$DeploymentInstanceType": "

The type of instance that hosts the optimized model that you create with the optimization job.

", + "DescribeOptimizationJobResponse$DeploymentInstanceType": "

The type of instance that hosts the optimized model that you create with the optimization job.

", + "OptimizationJobSummary$DeploymentInstanceType": "

The type of instance that hosts the optimized model that you create with the optimization job.

" + } + }, + "OptimizationJobEnvironmentVariables": { + "base": null, + "refs": { + "CreateOptimizationJobRequest$OptimizationEnvironment": "

The environment variables to set in the model container.

", + "DescribeOptimizationJobResponse$OptimizationEnvironment": "

The environment variables to set in the model container.

", + "ModelCompilationConfig$OverrideEnvironment": "

Environment variables that override the default ones in the model container.

", + "ModelQuantizationConfig$OverrideEnvironment": "

Environment variables that override the default ones in the model container.

" + } + }, + "OptimizationJobModelSource": { + "base": "

The location of the source model to optimize with an optimization job.

", + "refs": { + "CreateOptimizationJobRequest$ModelSource": "

The location of the source model to optimize with an optimization job.

", + "DescribeOptimizationJobResponse$ModelSource": "

The location of the source model to optimize with an optimization job.

" + } + }, + "OptimizationJobModelSourceS3": { + "base": "

The Amazon S3 location of a source model to optimize with an optimization job.

", + "refs": { + "OptimizationJobModelSource$S3": "

The Amazon S3 location of a source model to optimize with an optimization job.

" + } + }, + "OptimizationJobOutputConfig": { + "base": "

Details for where to store the optimized model that you create with the optimization job.

", + "refs": { + "CreateOptimizationJobRequest$OutputConfig": "

Details for where to store the optimized model that you create with the optimization job.

", + "DescribeOptimizationJobResponse$OutputConfig": "

Details for where to store the optimized model that you create with the optimization job.

" + } + }, + "OptimizationJobStatus": { + "base": null, + "refs": { + "DescribeOptimizationJobResponse$OptimizationJobStatus": "

The current status of the optimization job.

", + "ListOptimizationJobsRequest$StatusEquals": "

Filters the results to only those optimization jobs with the specified status.

", + "OptimizationJobSummary$OptimizationJobStatus": "

The current status of the optimization job.

" + } + }, + "OptimizationJobSummaries": { + "base": null, + "refs": { + "ListOptimizationJobsResponse$OptimizationJobSummaries": "

A list of optimization jobs and their properties that matches any of the filters you specified in the request.

" + } + }, + "OptimizationJobSummary": { + "base": "

Summarizes an optimization job by providing some of its key properties.

", + "refs": { + "OptimizationJobSummaries$member": null + } + }, + "OptimizationModelAcceptEula": { + "base": null, + "refs": { + "OptimizationModelAccessConfig$AcceptEula": "

Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

" + } + }, + "OptimizationModelAccessConfig": { + "base": "

The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

", + "refs": { + "OptimizationJobModelSourceS3$ModelAccessConfig": "

The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

" + } + }, + "OptimizationOutput": { + "base": "

Output values produced by an optimization job.

", + "refs": { + "DescribeOptimizationJobResponse$OptimizationOutput": "

Output values produced by an optimization job.

" + } + }, + "OptimizationType": { + "base": null, + "refs": { + "OptimizationTypes$member": null + } + }, + "OptimizationTypes": { + "base": null, + "refs": { + "OptimizationJobSummary$OptimizationTypes": "

The optimization techniques that are applied by the optimization job.

" + } + }, + "OptimizationVpcConfig": { + "base": "

A VPC in Amazon VPC that's accessible to an optimized that you create with an optimization job. You can control access to and from your resources by configuring a VPC. For more information, see Give SageMaker Access to Resources in your Amazon VPC.

", + "refs": { + "CreateOptimizationJobRequest$VpcConfig": "

A VPC in Amazon VPC that your optimized model has access to.

", + "DescribeOptimizationJobResponse$VpcConfig": "

A VPC in Amazon VPC that your optimized model has access to.

" + } + }, + "OptimizationVpcSecurityGroupId": { + "base": null, + "refs": { + "OptimizationVpcSecurityGroupIds$member": null + } + }, + "OptimizationVpcSecurityGroupIds": { + "base": null, + "refs": { + "OptimizationVpcConfig$SecurityGroupIds": "

The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security groups for the VPC that is specified in the Subnets field.

" + } + }, + "OptimizationVpcSubnetId": { + "base": null, + "refs": { + "OptimizationVpcSubnets$member": null + } + }, + "OptimizationVpcSubnets": { + "base": null, + "refs": { + "OptimizationVpcConfig$Subnets": "

The ID of the subnets in the VPC to which you want to connect your optimized model.

" + } + }, "OptionalDouble": { "base": null, "refs": { @@ -12518,6 +12771,12 @@ "refs": { } }, + "QProfileArn": { + "base": null, + "refs": { + "AmazonQSettings$QProfileArn": "

The ARN of the Amazon Q profile used within the domain.

" + } + }, "QualityCheckStepMetadata": { "base": "

Container for the metadata for a Quality check step. For more information, see the topic on QualityCheck step in the Amazon SageMaker Developer Guide.

", "refs": { @@ -13205,6 +13464,7 @@ "CreateModelInput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see SageMaker Roles.

To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission.

", "CreateModelQualityJobDefinitionRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "CreateNotebookInstanceInput$RoleArn": "

When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker Roles.

To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission.

", + "CreateOptimizationJobRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

During model optimization, Amazon SageMaker needs your permission to:

You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles.

", "CreatePipelineRequest$RoleArn": "

The Amazon Resource Name (ARN) of the role used by the pipeline to access and create resources.

", "CreateProcessingJobRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "CreateTrainingJobRequest$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf.

During model training, SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see SageMaker Roles.

To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission.

", @@ -13228,6 +13488,7 @@ "DescribeModelOutput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that you specified for the model.

", "DescribeModelQualityJobDefinitionResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "DescribeNotebookInstanceOutput$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role associated with the instance.

", + "DescribeOptimizationJobResponse$RoleArn": "

The ARN of the IAM role that you assigned to the optimization job.

", "DescribePipelineResponse$RoleArn": "

The Amazon Resource Name (ARN) that the pipeline uses to execute.

", "DescribeProcessingJobResponse$RoleArn": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.

", "DescribeTrainingJobResponse$RoleArn": "

The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.

", @@ -13321,6 +13582,7 @@ "S3ModelDataSource": { "base": "

Specifies the S3 location of ML model data to deploy.

", "refs": { + "AdditionalModelDataSource$S3DataSource": null, "ModelDataSource$S3DataSource": "

Specifies the S3 location of ML model data to deploy.

" } }, @@ -13395,6 +13657,8 @@ "MonitoringAppSpecification$PostAnalyticsProcessorSourceUri": "

An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.

", "MonitoringConstraintsResource$S3Uri": "

The Amazon S3 URI for the constraints resource.

", "MonitoringStatisticsResource$S3Uri": "

The Amazon S3 URI for the statistics resource.

", + "OptimizationJobModelSourceS3$S3Uri": "

An Amazon S3 URI that locates a source model to optimize with an optimization job.

", + "OptimizationJobOutputConfig$S3OutputLocation": "

The Amazon S3 URI for where to store the optimized model that you create with an optimization job.

", "OutputConfig$S3OutputLocation": "

Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.

", "OutputDataConfig$S3OutputPath": "

Identifies the S3 path where you want SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.

", "ProcessingS3Input$S3Uri": "

The URI of the Amazon S3 prefix Amazon SageMaker downloads data required to run a processing job.

", @@ -13883,6 +14147,7 @@ "ListMonitoringAlertHistoryRequest$SortOrder": "

The sort order, whether Ascending or Descending, of the alert history. The default is Descending.

", "ListMonitoringExecutionsRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", "ListMonitoringSchedulesRequest$SortOrder": "

Whether to sort the results in Ascending or Descending order. The default is Descending.

", + "ListOptimizationJobsRequest$SortOrder": "

The sort order for results. The default is Ascending

", "ListPipelineExecutionStepsRequest$SortOrder": "

The field by which to sort results. The default is CreatedTime.

", "ListPipelineExecutionsRequest$SortOrder": "

The sort order for results.

", "ListPipelinesRequest$SortOrder": "

The sort order for results.

", @@ -14259,6 +14524,11 @@ "refs": { } }, + "StopOptimizationJobRequest": { + "base": null, + "refs": { + } + }, "StopPipelineExecutionRequest": { "base": null, "refs": { @@ -14285,11 +14555,13 @@ } }, "StoppingCondition": { - "base": "

Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

", + "base": "

Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker ends the job. Use this API to cap costs.

To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

", "refs": { "CreateCompilationJobRequest$StoppingCondition": "

Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.

", + "CreateOptimizationJobRequest$StoppingCondition": null, "CreateTrainingJobRequest$StoppingCondition": "

Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

", "DescribeCompilationJobResponse$StoppingCondition": "

Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.

", + "DescribeOptimizationJobResponse$StoppingCondition": null, "DescribeTrainingJobResponse$StoppingCondition": "

Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

", "HyperParameterTrainingJobDefinition$StoppingCondition": "

Specifies a limit to how long a model hyperparameter training job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs.

", "TrainingJob$StoppingCondition": "

Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

", @@ -14461,6 +14733,7 @@ "ListTrialComponentsRequest$SourceArn": "

A filter that returns only components that have the specified source Amazon Resource Name (ARN). If you specify SourceArn, you can't filter by ExperimentName or TrialName.

", "ModelMetadataFilter$Value": "

The value to filter the model metadata.

", "ModelStepMetadata$Arn": "

The Amazon Resource Name (ARN) of the created model.

", + "OptimizationJobEnvironmentVariables$value": null, "OutputParameter$Name": "

The name of the output parameter.

", "QualityCheckStepMetadata$CheckType": "

The type of the Quality check step.

", "QualityCheckStepMetadata$ModelPackageGroupName": "

The model package group name.

", @@ -14708,6 +14981,7 @@ "CreateModelQualityJobDefinitionRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateMonitoringScheduleRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateNotebookInstanceInput$Tags": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

", + "CreateOptimizationJobRequest$Tags": "

A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", "CreatePipelineRequest$Tags": "

A list of tags to apply to the created pipeline.

", "CreateProcessingJobRequest$Tags": "

(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

", "CreateProjectInput$Tags": "

An array of key-value pairs that you want to use to organize and track your Amazon Web Services resource costs. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

", @@ -15097,6 +15371,8 @@ "DescribeModelQualityJobDefinitionResponse$CreationTime": "

The time at which the model quality job was created.

", "DescribeMonitoringScheduleResponse$CreationTime": "

The time at which the monitoring job was created.

", "DescribeMonitoringScheduleResponse$LastModifiedTime": "

The time at which the monitoring job was last modified.

", + "DescribeOptimizationJobResponse$OptimizationStartTime": "

The time when the optimization job started.

", + "DescribeOptimizationJobResponse$OptimizationEndTime": "

The time when the optimization job finished processing.

", "DescribePipelineDefinitionForExecutionResponse$CreationTime": "

The time when the pipeline was created.

", "DescribePipelineExecutionResponse$CreationTime": "

The time when the pipeline execution was created.

", "DescribePipelineExecutionResponse$LastModifiedTime": "

The time when the pipeline execution was modified last.

", @@ -15350,6 +15626,8 @@ "MonitoringSchedule$LastModifiedTime": "

The last time the monitoring schedule was changed.

", "MonitoringScheduleSummary$CreationTime": "

The creation time of the monitoring schedule.

", "MonitoringScheduleSummary$LastModifiedTime": "

The last time the monitoring schedule was modified.

", + "OptimizationJobSummary$OptimizationStartTime": "

The time when the optimization job started.

", + "OptimizationJobSummary$OptimizationEndTime": "

The time when the optimization job finished processing.

", "PendingDeploymentSummary$StartTime": "

The start time of the deployment.

", "Pipeline$CreationTime": "

The creation time of the pipeline.

", "Pipeline$LastModifiedTime": "

The time that the pipeline was last modified.

", diff --git a/apis/sagemaker/2017-07-24/paginators-1.json b/apis/sagemaker/2017-07-24/paginators-1.json index 89823b522f3..7c20db79961 100644 --- a/apis/sagemaker/2017-07-24/paginators-1.json +++ b/apis/sagemaker/2017-07-24/paginators-1.json @@ -318,6 +318,12 @@ "limit_key": "MaxResults", "result_key": "NotebookInstances" }, + "ListOptimizationJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "OptimizationJobSummaries" + }, "ListPipelineExecutionSteps": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/gems/aws-partitions/CHANGELOG.md b/gems/aws-partitions/CHANGELOG.md index 2cf4907d415..2196a416fc5 100644 --- a/gems/aws-partitions/CHANGELOG.md +++ b/gems/aws-partitions/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.954.0 (2024-07-09) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + 1.953.0 (2024-07-08) ------------------ diff --git a/gems/aws-partitions/VERSION b/gems/aws-partitions/VERSION index 4ec399a03c7..602f6467ce2 100644 --- a/gems/aws-partitions/VERSION +++ b/gems/aws-partitions/VERSION @@ -1 +1 @@ -1.953.0 +1.954.0 diff --git a/gems/aws-partitions/partitions.json b/gems/aws-partitions/partitions.json index 312484c9c10..56460f6da0f 100644 --- a/gems/aws-partitions/partitions.json +++ b/gems/aws-partitions/partitions.json @@ -28523,31 +28523,17 @@ }, "redshift" : { "endpoints" : { - "fips-us-iso-east-1" : { + "us-iso-east-1" : { "credentialScope" : { "region" : "us-iso-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov" + "hostname" : "redshift.us-iso-east-1.c2s.ic.gov" }, - "fips-us-iso-west-1" : { + "us-iso-west-1" : { "credentialScope" : { "region" : "us-iso-west-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov" - }, - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-iso-west-1.c2s.ic.gov" } } }, @@ -29211,18 +29197,11 @@ }, "redshift" : { "endpoints" : { - "fips-us-isob-east-1" : { + "us-isob-east-1" : { "credentialScope" : { "region" : "us-isob-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov" - }, - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-isob-east-1.sc2s.sgov.gov" } } }, diff --git a/gems/aws-sdk-datazone/CHANGELOG.md b/gems/aws-sdk-datazone/CHANGELOG.md index ced6131c6ad..678ef0ea833 100644 --- a/gems/aws-sdk-datazone/CHANGELOG.md +++ b/gems/aws-sdk-datazone/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.14.0 (2024-07-09) +------------------ + +* Feature - This release deprecates dataProductItem field from SearchInventoryResultItem, along with some unused DataProduct shapes + 1.13.0 (2024-07-02) ------------------ diff --git a/gems/aws-sdk-datazone/VERSION b/gems/aws-sdk-datazone/VERSION index feaae22bac7..850e742404b 100644 --- a/gems/aws-sdk-datazone/VERSION +++ b/gems/aws-sdk-datazone/VERSION @@ -1 +1 @@ -1.13.0 +1.14.0 diff --git a/gems/aws-sdk-datazone/lib/aws-sdk-datazone.rb b/gems/aws-sdk-datazone/lib/aws-sdk-datazone.rb index 03292514661..ccd695d7288 100644 --- a/gems/aws-sdk-datazone/lib/aws-sdk-datazone.rb +++ b/gems/aws-sdk-datazone/lib/aws-sdk-datazone.rb @@ -52,6 +52,6 @@ # @!group service module Aws::DataZone - GEM_VERSION = '1.13.0' + GEM_VERSION = '1.14.0' end diff --git a/gems/aws-sdk-datazone/lib/aws-sdk-datazone/client.rb b/gems/aws-sdk-datazone/lib/aws-sdk-datazone/client.rb index 4307e116e9f..e6716b77c64 100644 --- a/gems/aws-sdk-datazone/lib/aws-sdk-datazone/client.rb +++ b/gems/aws-sdk-datazone/lib/aws-sdk-datazone/client.rb @@ -8461,7 +8461,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-datazone' - context[:gem_version] = '1.13.0' + context[:gem_version] = '1.14.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-datazone/lib/aws-sdk-datazone/client_api.rb b/gems/aws-sdk-datazone/lib/aws-sdk-datazone/client_api.rb index 87ca060221a..a94133d631e 100644 --- a/gems/aws-sdk-datazone/lib/aws-sdk-datazone/client_api.rb +++ b/gems/aws-sdk-datazone/lib/aws-sdk-datazone/client_api.rb @@ -1246,11 +1246,11 @@ module ClientApi DataProductItem.add_member(:item_id, Shapes::ShapeRef.new(shape: DataProductId, location_name: "itemId")) DataProductItem.struct_class = Types::DataProductItem - DataProductItems.member = Shapes::ShapeRef.new(shape: DataProductItem) + DataProductItems.member = Shapes::ShapeRef.new(shape: DataProductItem, deprecated: true) DataProductSummary.add_member(:created_at, Shapes::ShapeRef.new(shape: CreatedAt, location_name: "createdAt")) DataProductSummary.add_member(:created_by, Shapes::ShapeRef.new(shape: CreatedBy, location_name: "createdBy")) - DataProductSummary.add_member(:data_product_items, Shapes::ShapeRef.new(shape: DataProductItems, location_name: "dataProductItems")) + DataProductSummary.add_member(:data_product_items, Shapes::ShapeRef.new(shape: DataProductItems, deprecated: true, location_name: "dataProductItems")) DataProductSummary.add_member(:description, Shapes::ShapeRef.new(shape: DataProductDescription, location_name: "description")) DataProductSummary.add_member(:domain_id, Shapes::ShapeRef.new(shape: DomainId, required: true, location_name: "domainId")) DataProductSummary.add_member(:glossary_terms, Shapes::ShapeRef.new(shape: GlossaryTerms, location_name: "glossaryTerms")) @@ -2786,7 +2786,7 @@ module ClientApi SearchInput.struct_class = Types::SearchInput SearchInventoryResultItem.add_member(:asset_item, Shapes::ShapeRef.new(shape: AssetItem, location_name: "assetItem")) - SearchInventoryResultItem.add_member(:data_product_item, Shapes::ShapeRef.new(shape: DataProductSummary, location_name: "dataProductItem")) + SearchInventoryResultItem.add_member(:data_product_item, Shapes::ShapeRef.new(shape: DataProductSummary, deprecated: true, location_name: "dataProductItem", metadata: {"deprecatedMessage"=>"This field is deprecated."})) SearchInventoryResultItem.add_member(:glossary_item, Shapes::ShapeRef.new(shape: GlossaryItem, location_name: "glossaryItem")) SearchInventoryResultItem.add_member(:glossary_term_item, Shapes::ShapeRef.new(shape: GlossaryTermItem, location_name: "glossaryTermItem")) SearchInventoryResultItem.add_member(:unknown, Shapes::ShapeRef.new(shape: nil, location_name: 'unknown')) diff --git a/gems/aws-sdk-fsx/CHANGELOG.md b/gems/aws-sdk-fsx/CHANGELOG.md index 26e829a0586..228a5f92765 100644 --- a/gems/aws-sdk-fsx/CHANGELOG.md +++ b/gems/aws-sdk-fsx/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.94.0 (2024-07-09) +------------------ + +* Feature - Adds support for FSx for NetApp ONTAP 2nd Generation file systems, and FSx for OpenZFS Single AZ HA file systems. + 1.93.0 (2024-07-02) ------------------ diff --git a/gems/aws-sdk-fsx/VERSION b/gems/aws-sdk-fsx/VERSION index 95784efddbc..8db4a57b3d0 100644 --- a/gems/aws-sdk-fsx/VERSION +++ b/gems/aws-sdk-fsx/VERSION @@ -1 +1 @@ -1.93.0 +1.94.0 diff --git a/gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb b/gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb index 924e057d83a..8586c1fcd23 100644 --- a/gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb +++ b/gems/aws-sdk-fsx/lib/aws-sdk-fsx.rb @@ -52,6 +52,6 @@ # @!group service module Aws::FSx - GEM_VERSION = '1.93.0' + GEM_VERSION = '1.94.0' end diff --git a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb index b3caa963824..18dd83e91f2 100644 --- a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb +++ b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client.rb @@ -794,10 +794,10 @@ def cancel_data_repository_task(params = {}, options = {}) # resp.backup.file_system.lustre_configuration.metadata_configuration.iops #=> Integer # resp.backup.file_system.lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.backup.file_system.administrative_actions #=> Array - # resp.backup.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.backup.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.backup.file_system.administrative_actions[0].progress_percent #=> Integer # resp.backup.file_system.administrative_actions[0].request_time #=> Time - # resp.backup.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.backup.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.backup.file_system.administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.backup.file_system.administrative_actions[0].failure_details.message #=> String # resp.backup.file_system.administrative_actions[0].target_volume_values.creation_time #=> Time @@ -883,7 +883,7 @@ def cancel_data_repository_task(params = {}, options = {}) # resp.backup.file_system.administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.backup.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backup.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.backup.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.backup.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.backup.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backup.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -906,7 +906,7 @@ def cancel_data_repository_task(params = {}, options = {}) # resp.backup.file_system.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.backup.file_system.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.backup.file_system.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.backup.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.backup.file_system.open_zfs_configuration.throughput_capacity #=> Integer # resp.backup.file_system.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.backup.file_system.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -966,10 +966,10 @@ def cancel_data_repository_task(params = {}, options = {}) # resp.backup.volume.volume_type #=> String, one of "ONTAP", "OPENZFS" # resp.backup.volume.lifecycle_transition_reason.message #=> String # resp.backup.volume.administrative_actions #=> Array - # resp.backup.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.backup.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.backup.volume.administrative_actions[0].progress_percent #=> Integer # resp.backup.volume.administrative_actions[0].request_time #=> Time - # resp.backup.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.backup.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.backup.volume.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.backup.volume.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -1040,7 +1040,7 @@ def cancel_data_repository_task(params = {}, options = {}) # resp.backup.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -1063,7 +1063,7 @@ def cancel_data_repository_task(params = {}, options = {}) # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -1210,10 +1210,10 @@ def copy_backup(params = {}, options = {}) # resp.volume_id #=> String # resp.lifecycle #=> String, one of "CREATING", "CREATED", "DELETING", "FAILED", "MISCONFIGURED", "PENDING", "AVAILABLE" # resp.administrative_actions #=> Array - # resp.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.administrative_actions[0].progress_percent #=> Integer # resp.administrative_actions[0].request_time #=> Time - # resp.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -1284,7 +1284,7 @@ def copy_backup(params = {}, options = {}) # resp.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -1307,7 +1307,7 @@ def copy_backup(params = {}, options = {}) # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -1635,10 +1635,10 @@ def copy_snapshot_and_update_volume(params = {}, options = {}) # resp.backup.file_system.lustre_configuration.metadata_configuration.iops #=> Integer # resp.backup.file_system.lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.backup.file_system.administrative_actions #=> Array - # resp.backup.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.backup.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.backup.file_system.administrative_actions[0].progress_percent #=> Integer # resp.backup.file_system.administrative_actions[0].request_time #=> Time - # resp.backup.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.backup.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.backup.file_system.administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.backup.file_system.administrative_actions[0].failure_details.message #=> String # resp.backup.file_system.administrative_actions[0].target_volume_values.creation_time #=> Time @@ -1724,7 +1724,7 @@ def copy_snapshot_and_update_volume(params = {}, options = {}) # resp.backup.file_system.administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.backup.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backup.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.backup.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.backup.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.backup.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backup.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -1747,7 +1747,7 @@ def copy_snapshot_and_update_volume(params = {}, options = {}) # resp.backup.file_system.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.backup.file_system.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.backup.file_system.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.backup.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.backup.file_system.open_zfs_configuration.throughput_capacity #=> Integer # resp.backup.file_system.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.backup.file_system.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -1807,10 +1807,10 @@ def copy_snapshot_and_update_volume(params = {}, options = {}) # resp.backup.volume.volume_type #=> String, one of "ONTAP", "OPENZFS" # resp.backup.volume.lifecycle_transition_reason.message #=> String # resp.backup.volume.administrative_actions #=> Array - # resp.backup.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.backup.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.backup.volume.administrative_actions[0].progress_percent #=> Integer # resp.backup.volume.administrative_actions[0].request_time #=> Time - # resp.backup.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.backup.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.backup.volume.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.backup.volume.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -1881,7 +1881,7 @@ def copy_snapshot_and_update_volume(params = {}, options = {}) # resp.backup.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -1904,7 +1904,7 @@ def copy_snapshot_and_update_volume(params = {}, options = {}) # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.backup.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -2864,7 +2864,7 @@ def create_file_cache(params = {}, options = {}) # ontap_configuration: { # automatic_backup_retention_days: 1, # daily_automatic_backup_start_time: "DailyTime", - # deployment_type: "MULTI_AZ_1", # required, accepts MULTI_AZ_1, SINGLE_AZ_1, SINGLE_AZ_2 + # deployment_type: "MULTI_AZ_1", # required, accepts MULTI_AZ_1, SINGLE_AZ_1, SINGLE_AZ_2, MULTI_AZ_2 # endpoint_ip_address_range: "IpAddressRange", # fsx_admin_password: "AdminPassword", # disk_iops_configuration: { @@ -2884,7 +2884,7 @@ def create_file_cache(params = {}, options = {}) # copy_tags_to_backups: false, # copy_tags_to_volumes: false, # daily_automatic_backup_start_time: "DailyTime", - # deployment_type: "SINGLE_AZ_1", # required, accepts SINGLE_AZ_1, SINGLE_AZ_2, MULTI_AZ_1 + # deployment_type: "SINGLE_AZ_1", # required, accepts SINGLE_AZ_1, SINGLE_AZ_2, SINGLE_AZ_HA_1, SINGLE_AZ_HA_2, MULTI_AZ_1 # throughput_capacity: 1, # required # weekly_maintenance_start_time: "WeeklyTime", # disk_iops_configuration: { @@ -2990,10 +2990,10 @@ def create_file_cache(params = {}, options = {}) # resp.file_system.lustre_configuration.metadata_configuration.iops #=> Integer # resp.file_system.lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.file_system.administrative_actions #=> Array - # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.file_system.administrative_actions[0].progress_percent #=> Integer # resp.file_system.administrative_actions[0].request_time #=> Time - # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.file_system.administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.file_system.administrative_actions[0].failure_details.message #=> String # resp.file_system.administrative_actions[0].target_volume_values.creation_time #=> Time @@ -3079,7 +3079,7 @@ def create_file_cache(params = {}, options = {}) # resp.file_system.administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -3102,7 +3102,7 @@ def create_file_cache(params = {}, options = {}) # resp.file_system.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.file_system.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.file_system.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.file_system.open_zfs_configuration.throughput_capacity #=> Integer # resp.file_system.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.file_system.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -3428,7 +3428,7 @@ def create_file_system(params = {}, options = {}) # copy_tags_to_backups: false, # copy_tags_to_volumes: false, # daily_automatic_backup_start_time: "DailyTime", - # deployment_type: "SINGLE_AZ_1", # required, accepts SINGLE_AZ_1, SINGLE_AZ_2, MULTI_AZ_1 + # deployment_type: "SINGLE_AZ_1", # required, accepts SINGLE_AZ_1, SINGLE_AZ_2, SINGLE_AZ_HA_1, SINGLE_AZ_HA_2, MULTI_AZ_1 # throughput_capacity: 1, # required # weekly_maintenance_start_time: "WeeklyTime", # disk_iops_configuration: { @@ -3535,10 +3535,10 @@ def create_file_system(params = {}, options = {}) # resp.file_system.lustre_configuration.metadata_configuration.iops #=> Integer # resp.file_system.lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.file_system.administrative_actions #=> Array - # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.file_system.administrative_actions[0].progress_percent #=> Integer # resp.file_system.administrative_actions[0].request_time #=> Time - # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.file_system.administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.file_system.administrative_actions[0].failure_details.message #=> String # resp.file_system.administrative_actions[0].target_volume_values.creation_time #=> Time @@ -3624,7 +3624,7 @@ def create_file_system(params = {}, options = {}) # resp.file_system.administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -3647,7 +3647,7 @@ def create_file_system(params = {}, options = {}) # resp.file_system.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.file_system.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.file_system.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.file_system.open_zfs_configuration.throughput_capacity #=> Integer # resp.file_system.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.file_system.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -3749,10 +3749,10 @@ def create_file_system_from_backup(params = {}, options = {}) # resp.snapshot.tags[0].key #=> String # resp.snapshot.tags[0].value #=> String # resp.snapshot.administrative_actions #=> Array - # resp.snapshot.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.snapshot.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.snapshot.administrative_actions[0].progress_percent #=> Integer # resp.snapshot.administrative_actions[0].request_time #=> Time - # resp.snapshot.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.snapshot.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.snapshot.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.snapshot.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -3823,7 +3823,7 @@ def create_file_system_from_backup(params = {}, options = {}) # resp.snapshot.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -3846,7 +3846,7 @@ def create_file_system_from_backup(params = {}, options = {}) # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -4232,10 +4232,10 @@ def create_storage_virtual_machine(params = {}, options = {}) # resp.volume.volume_type #=> String, one of "ONTAP", "OPENZFS" # resp.volume.lifecycle_transition_reason.message #=> String # resp.volume.administrative_actions #=> Array - # resp.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.volume.administrative_actions[0].progress_percent #=> Integer # resp.volume.administrative_actions[0].request_time #=> Time - # resp.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.volume.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.volume.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.volume.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -4306,7 +4306,7 @@ def create_storage_virtual_machine(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -4329,7 +4329,7 @@ def create_storage_virtual_machine(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -4522,10 +4522,10 @@ def create_volume(params = {}, options = {}) # resp.volume.volume_type #=> String, one of "ONTAP", "OPENZFS" # resp.volume.lifecycle_transition_reason.message #=> String # resp.volume.administrative_actions #=> Array - # resp.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.volume.administrative_actions[0].progress_percent #=> Integer # resp.volume.administrative_actions[0].request_time #=> Time - # resp.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.volume.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.volume.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.volume.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -4596,7 +4596,7 @@ def create_volume(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -4619,7 +4619,7 @@ def create_volume(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -5353,10 +5353,10 @@ def delete_volume(params = {}, options = {}) # resp.backups[0].file_system.lustre_configuration.metadata_configuration.iops #=> Integer # resp.backups[0].file_system.lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.backups[0].file_system.administrative_actions #=> Array - # resp.backups[0].file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.backups[0].file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.backups[0].file_system.administrative_actions[0].progress_percent #=> Integer # resp.backups[0].file_system.administrative_actions[0].request_time #=> Time - # resp.backups[0].file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.backups[0].file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.backups[0].file_system.administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.backups[0].file_system.administrative_actions[0].failure_details.message #=> String # resp.backups[0].file_system.administrative_actions[0].target_volume_values.creation_time #=> Time @@ -5442,7 +5442,7 @@ def delete_volume(params = {}, options = {}) # resp.backups[0].file_system.administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.backups[0].file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backups[0].file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backups[0].file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.backups[0].file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.backups[0].file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.backups[0].file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backups[0].file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -5465,7 +5465,7 @@ def delete_volume(params = {}, options = {}) # resp.backups[0].file_system.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.backups[0].file_system.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.backups[0].file_system.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.backups[0].file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.backups[0].file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.backups[0].file_system.open_zfs_configuration.throughput_capacity #=> Integer # resp.backups[0].file_system.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.backups[0].file_system.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -5525,10 +5525,10 @@ def delete_volume(params = {}, options = {}) # resp.backups[0].volume.volume_type #=> String, one of "ONTAP", "OPENZFS" # resp.backups[0].volume.lifecycle_transition_reason.message #=> String # resp.backups[0].volume.administrative_actions #=> Array - # resp.backups[0].volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.backups[0].volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.backups[0].volume.administrative_actions[0].progress_percent #=> Integer # resp.backups[0].volume.administrative_actions[0].request_time #=> Time - # resp.backups[0].volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.backups[0].volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.backups[0].volume.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.backups[0].volume.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.backups[0].volume.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -5599,7 +5599,7 @@ def delete_volume(params = {}, options = {}) # resp.backups[0].volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.backups[0].volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -5622,7 +5622,7 @@ def delete_volume(params = {}, options = {}) # resp.backups[0].volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.backups[0].volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.backups[0].volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.backups[0].volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.backups[0].volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.backups[0].volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.backups[0].volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.backups[0].volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -6218,10 +6218,10 @@ def describe_file_system_aliases(params = {}, options = {}) # resp.file_systems[0].lustre_configuration.metadata_configuration.iops #=> Integer # resp.file_systems[0].lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.file_systems[0].administrative_actions #=> Array - # resp.file_systems[0].administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.file_systems[0].administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.file_systems[0].administrative_actions[0].progress_percent #=> Integer # resp.file_systems[0].administrative_actions[0].request_time #=> Time - # resp.file_systems[0].administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.file_systems[0].administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.file_systems[0].administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.file_systems[0].administrative_actions[0].failure_details.message #=> String # resp.file_systems[0].administrative_actions[0].target_volume_values.creation_time #=> Time @@ -6307,7 +6307,7 @@ def describe_file_system_aliases(params = {}, options = {}) # resp.file_systems[0].administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.file_systems[0].ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_systems[0].ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_systems[0].ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.file_systems[0].ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.file_systems[0].ontap_configuration.endpoint_ip_address_range #=> String # resp.file_systems[0].ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_systems[0].ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -6330,7 +6330,7 @@ def describe_file_system_aliases(params = {}, options = {}) # resp.file_systems[0].open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.file_systems[0].open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.file_systems[0].open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.file_systems[0].open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.file_systems[0].open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.file_systems[0].open_zfs_configuration.throughput_capacity #=> Integer # resp.file_systems[0].open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.file_systems[0].open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -6466,10 +6466,10 @@ def describe_shared_vpc_configuration(params = {}, options = {}) # resp.snapshots[0].tags[0].key #=> String # resp.snapshots[0].tags[0].value #=> String # resp.snapshots[0].administrative_actions #=> Array - # resp.snapshots[0].administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.snapshots[0].administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.snapshots[0].administrative_actions[0].progress_percent #=> Integer # resp.snapshots[0].administrative_actions[0].request_time #=> Time - # resp.snapshots[0].administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.snapshots[0].administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.snapshots[0].administrative_actions[0].target_file_system_values.owner_id #=> String # resp.snapshots[0].administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.snapshots[0].administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -6540,7 +6540,7 @@ def describe_shared_vpc_configuration(params = {}, options = {}) # resp.snapshots[0].administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.snapshots[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -6563,7 +6563,7 @@ def describe_shared_vpc_configuration(params = {}, options = {}) # resp.snapshots[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.snapshots[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.snapshots[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshots[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.snapshots[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.snapshots[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.snapshots[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.snapshots[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -6829,10 +6829,10 @@ def describe_storage_virtual_machines(params = {}, options = {}) # resp.volumes[0].volume_type #=> String, one of "ONTAP", "OPENZFS" # resp.volumes[0].lifecycle_transition_reason.message #=> String # resp.volumes[0].administrative_actions #=> Array - # resp.volumes[0].administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.volumes[0].administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.volumes[0].administrative_actions[0].progress_percent #=> Integer # resp.volumes[0].administrative_actions[0].request_time #=> Time - # resp.volumes[0].administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.volumes[0].administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.volumes[0].administrative_actions[0].target_file_system_values.owner_id #=> String # resp.volumes[0].administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.volumes[0].administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -6903,7 +6903,7 @@ def describe_storage_virtual_machines(params = {}, options = {}) # resp.volumes[0].administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.volumes[0].administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -6926,7 +6926,7 @@ def describe_storage_virtual_machines(params = {}, options = {}) # resp.volumes[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.volumes[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.volumes[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.volumes[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.volumes[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.volumes[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.volumes[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.volumes[0].administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -7232,10 +7232,10 @@ def list_tags_for_resource(params = {}, options = {}) # resp.file_system.lustre_configuration.metadata_configuration.iops #=> Integer # resp.file_system.lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.file_system.administrative_actions #=> Array - # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.file_system.administrative_actions[0].progress_percent #=> Integer # resp.file_system.administrative_actions[0].request_time #=> Time - # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.file_system.administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.file_system.administrative_actions[0].failure_details.message #=> String # resp.file_system.administrative_actions[0].target_volume_values.creation_time #=> Time @@ -7321,7 +7321,7 @@ def list_tags_for_resource(params = {}, options = {}) # resp.file_system.administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -7344,7 +7344,7 @@ def list_tags_for_resource(params = {}, options = {}) # resp.file_system.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.file_system.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.file_system.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.file_system.open_zfs_configuration.throughput_capacity #=> Integer # resp.file_system.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.file_system.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -7417,10 +7417,10 @@ def release_file_system_nfs_v3_locks(params = {}, options = {}) # resp.volume_id #=> String # resp.lifecycle #=> String, one of "CREATING", "CREATED", "DELETING", "FAILED", "MISCONFIGURED", "PENDING", "AVAILABLE" # resp.administrative_actions #=> Array - # resp.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.administrative_actions[0].progress_percent #=> Integer # resp.administrative_actions[0].request_time #=> Time - # resp.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -7491,7 +7491,7 @@ def release_file_system_nfs_v3_locks(params = {}, options = {}) # resp.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -7514,7 +7514,7 @@ def release_file_system_nfs_v3_locks(params = {}, options = {}) # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -7715,10 +7715,10 @@ def restore_volume_from_snapshot(params = {}, options = {}) # resp.file_system.lustre_configuration.metadata_configuration.iops #=> Integer # resp.file_system.lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.file_system.administrative_actions #=> Array - # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.file_system.administrative_actions[0].progress_percent #=> Integer # resp.file_system.administrative_actions[0].request_time #=> Time - # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.file_system.administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.file_system.administrative_actions[0].failure_details.message #=> String # resp.file_system.administrative_actions[0].target_volume_values.creation_time #=> Time @@ -7804,7 +7804,7 @@ def restore_volume_from_snapshot(params = {}, options = {}) # resp.file_system.administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -7827,7 +7827,7 @@ def restore_volume_from_snapshot(params = {}, options = {}) # resp.file_system.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.file_system.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.file_system.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.file_system.open_zfs_configuration.throughput_capacity #=> Integer # resp.file_system.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.file_system.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -8386,6 +8386,7 @@ def update_file_cache(params = {}, options = {}) # add_route_table_ids: ["RouteTableId"], # remove_route_table_ids: ["RouteTableId"], # throughput_capacity_per_ha_pair: 1, + # ha_pairs: 1, # }, # open_zfs_configuration: { # automatic_backup_retention_days: 1, @@ -8474,10 +8475,10 @@ def update_file_cache(params = {}, options = {}) # resp.file_system.lustre_configuration.metadata_configuration.iops #=> Integer # resp.file_system.lustre_configuration.metadata_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" # resp.file_system.administrative_actions #=> Array - # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.file_system.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.file_system.administrative_actions[0].progress_percent #=> Integer # resp.file_system.administrative_actions[0].request_time #=> Time - # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.file_system.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.file_system.administrative_actions[0].target_file_system_values #=> Types::FileSystem # resp.file_system.administrative_actions[0].failure_details.message #=> String # resp.file_system.administrative_actions[0].target_volume_values.creation_time #=> Time @@ -8563,7 +8564,7 @@ def update_file_cache(params = {}, options = {}) # resp.file_system.administrative_actions[0].remaining_transfer_bytes #=> Integer # resp.file_system.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.file_system.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.file_system.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.file_system.ontap_configuration.endpoint_ip_address_range #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.file_system.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -8586,7 +8587,7 @@ def update_file_cache(params = {}, options = {}) # resp.file_system.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.file_system.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.file_system.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.file_system.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.file_system.open_zfs_configuration.throughput_capacity #=> Integer # resp.file_system.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.file_system.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -8707,10 +8708,10 @@ def update_shared_vpc_configuration(params = {}, options = {}) # resp.snapshot.tags[0].key #=> String # resp.snapshot.tags[0].value #=> String # resp.snapshot.administrative_actions #=> Array - # resp.snapshot.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.snapshot.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.snapshot.administrative_actions[0].progress_percent #=> Integer # resp.snapshot.administrative_actions[0].request_time #=> Time - # resp.snapshot.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.snapshot.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.snapshot.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.snapshot.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -8781,7 +8782,7 @@ def update_shared_vpc_configuration(params = {}, options = {}) # resp.snapshot.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -8804,7 +8805,7 @@ def update_shared_vpc_configuration(params = {}, options = {}) # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.snapshot.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -9130,10 +9131,10 @@ def update_storage_virtual_machine(params = {}, options = {}) # resp.volume.volume_type #=> String, one of "ONTAP", "OPENZFS" # resp.volume.lifecycle_transition_reason.message #=> String # resp.volume.administrative_actions #=> Array - # resp.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT" + # resp.volume.administrative_actions[0].administrative_action_type #=> String, one of "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", "FILE_SYSTEM_ALIAS_ASSOCIATION", "FILE_SYSTEM_ALIAS_DISASSOCIATION", "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", "VOLUME_RESTORE", "THROUGHPUT_OPTIMIZATION", "IOPS_OPTIMIZATION", "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", "VOLUME_INITIALIZE_WITH_SNAPSHOT", "DOWNLOAD_DATA_FROM_BACKUP" # resp.volume.administrative_actions[0].progress_percent #=> Integer # resp.volume.administrative_actions[0].request_time #=> Time - # resp.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING" + # resp.volume.administrative_actions[0].status #=> String, one of "FAILED", "IN_PROGRESS", "PENDING", "COMPLETED", "UPDATED_OPTIMIZING", "OPTIMIZING" # resp.volume.administrative_actions[0].target_file_system_values.owner_id #=> String # resp.volume.administrative_actions[0].target_file_system_values.creation_time #=> Time # resp.volume.administrative_actions[0].target_file_system_values.file_system_id #=> String @@ -9204,7 +9205,7 @@ def update_storage_virtual_machine(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.administrative_actions #=> Types::AdministrativeActions # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.automatic_backup_retention_days #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2" + # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.deployment_type #=> String, one of "MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_2" # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoint_ip_address_range #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.dns_name #=> String # resp.volume.administrative_actions[0].target_file_system_values.ontap_configuration.endpoints.intercluster.ip_addresses #=> Array @@ -9227,7 +9228,7 @@ def update_storage_virtual_machine(params = {}, options = {}) # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_backups #=> Boolean # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.copy_tags_to_volumes #=> Boolean # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.daily_automatic_backup_start_time #=> String - # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "MULTI_AZ_1" + # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.deployment_type #=> String, one of "SINGLE_AZ_1", "SINGLE_AZ_2", "SINGLE_AZ_HA_1", "SINGLE_AZ_HA_2", "MULTI_AZ_1" # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.throughput_capacity #=> Integer # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.weekly_maintenance_start_time #=> String # resp.volume.administrative_actions[0].target_file_system_values.open_zfs_configuration.disk_iops_configuration.mode #=> String, one of "AUTOMATIC", "USER_PROVISIONED" @@ -9302,7 +9303,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-fsx' - context[:gem_version] = '1.93.0' + context[:gem_version] = '1.94.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client_api.rb b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client_api.rb index 0f082b5a2be..321111d401c 100644 --- a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client_api.rb +++ b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/client_api.rb @@ -1716,6 +1716,7 @@ module ClientApi UpdateFileSystemOntapConfiguration.add_member(:add_route_table_ids, Shapes::ShapeRef.new(shape: RouteTableIds, location_name: "AddRouteTableIds")) UpdateFileSystemOntapConfiguration.add_member(:remove_route_table_ids, Shapes::ShapeRef.new(shape: RouteTableIds, location_name: "RemoveRouteTableIds")) UpdateFileSystemOntapConfiguration.add_member(:throughput_capacity_per_ha_pair, Shapes::ShapeRef.new(shape: ThroughputCapacityPerHAPair, location_name: "ThroughputCapacityPerHAPair")) + UpdateFileSystemOntapConfiguration.add_member(:ha_pairs, Shapes::ShapeRef.new(shape: HAPairs, location_name: "HAPairs")) UpdateFileSystemOntapConfiguration.struct_class = Types::UpdateFileSystemOntapConfiguration UpdateFileSystemOpenZFSConfiguration.add_member(:automatic_backup_retention_days, Shapes::ShapeRef.new(shape: AutomaticBackupRetentionDays, location_name: "AutomaticBackupRetentionDays")) diff --git a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb index 7ea00d154c5..b59d09d2918 100644 --- a/gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb +++ b/gems/aws-sdk-fsx/lib/aws-sdk-fsx/types.rb @@ -91,7 +91,7 @@ class ActiveDirectoryError < Struct.new( # File Server User Guide*. # # * `STORAGE_OPTIMIZATION` - After the `FILE_SYSTEM_UPDATE` task to - # increase a file system's storage capacity has been completed + # increase a file system's storage capacity has completed # successfully, a `STORAGE_OPTIMIZATION` task starts. # # * For Windows and ONTAP, storage optimization is the process of @@ -154,6 +154,11 @@ class ActiveDirectoryError < Struct.new( # * `RELEASE_NFS_V3_LOCKS` - Tracks the release of Network File System # (NFS) V3 locks on an Amazon FSx for OpenZFS file system. # + # * `DOWNLOAD_DATA_FROM_BACKUP` - An FSx for ONTAP backup is being + # restored to a new volume on a second-generation file system. Once + # the all the file metadata is loaded onto the volume, you can mount + # the volume with read-only access. during this process. + # # * `VOLUME_INITIALIZE_WITH_SNAPSHOT` - A volume is being created from # a snapshot on a different FSx for OpenZFS file system. You can # initiate this from the Amazon FSx console, API (`CreateVolume`), @@ -178,9 +183,9 @@ class ActiveDirectoryError < Struct.new( # @return [String] # # @!attribute [rw] progress_percent - # The percentage-complete status of a `STORAGE_OPTIMIZATION` - # administrative action. Does not apply to any other administrative - # action type. + # The percentage-complete status of a `STORAGE_OPTIMIZATION` or + # `DOWNLOAD_DATA_FROM_BACKUP` administrative action. Does not apply to + # any other administrative action type. # @return [Integer] # # @!attribute [rw] request_time @@ -202,9 +207,25 @@ class ActiveDirectoryError < Struct.new( # * `COMPLETED` - Amazon FSx has finished processing the # administrative task. # + # For a backup restore to a second-generation FSx for ONTAP file + # system, indicates that all data has been downloaded to the volume, + # and clients now have read-write access to volume. + # # * `UPDATED_OPTIMIZING` - For a storage-capacity increase update, # Amazon FSx has updated the file system with the new storage # capacity, and is now performing the storage-optimization process. + # + # * `PENDING` - For a backup restore to a second-generation FSx for + # ONTAP file system, indicates that the file metadata is being + # downloaded onto the volume. The volume's Lifecycle state is + # CREATING. + # + # * `IN_PROGRESS` - For a backup restore to a second-generation FSx + # for ONTAP file system, indicates that all metadata has been + # downloaded to the new volume and client can access data with + # read-only access while Amazon FSx downloads the file data to the + # volume. Track the progress of this process with the + # `ProgressPercent` element. # @return [String] # # @!attribute [rw] target_file_system_values @@ -281,7 +302,7 @@ class AdministrativeActionFailureDetails < Struct.new( # the following conditions: # # * The strings in the value of `Aggregates` are not are not formatted - # as `aggrX`, where X is a number between 1 and 6. + # as `aggrX`, where X is a number between 1 and 12. # # * The value of `Aggregates` contains aggregates that are not # present. @@ -2037,14 +2058,21 @@ class CreateFileSystemLustreMetadataConfiguration < Struct.new( # Specifies the FSx for ONTAP file system deployment type to use in # creating the file system. # - # * `MULTI_AZ_1` - (Default) A high availability file system - # configured for Multi-AZ redundancy to tolerate temporary - # Availability Zone (AZ) unavailability. + # * `MULTI_AZ_1` - A high availability file system configured for + # Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) + # unavailability. This is a first-generation FSx for ONTAP file + # system. + # + # * `MULTI_AZ_2` - A high availability file system configured for + # Multi-AZ redundancy to tolerate temporary AZ unavailability. This + # is a second-generation FSx for ONTAP file system. # # * `SINGLE_AZ_1` - A file system configured for Single-AZ redundancy. + # This is a first-generation FSx for ONTAP file system. # # * `SINGLE_AZ_2` - A file system configured with multiple - # high-availability (HA) pairs for Single-AZ redundancy. + # high-availability (HA) pairs for Single-AZ redundancy. This is a + # second-generation FSx for ONTAP file system. # # For information about the use cases for Multi-AZ and Single-AZ # deployments, refer to [Choosing a file system deployment type][1]. @@ -2077,9 +2105,9 @@ class CreateFileSystemLustreMetadataConfiguration < Struct.new( # @return [Types::DiskIopsConfiguration] # # @!attribute [rw] preferred_subnet_id - # Required when `DeploymentType` is set to `MULTI_AZ_1`. This - # specifies the subnet in which you want the preferred file server to - # be located. + # Required when `DeploymentType` is set to `MULTI_AZ_1` or + # `MULTI_AZ_2`. This specifies the subnet in which you want the + # preferred file server to be located. # @return [String] # # @!attribute [rw] route_table_ids @@ -2137,12 +2165,16 @@ class CreateFileSystemLustreMetadataConfiguration < Struct.new( # # @!attribute [rw] ha_pairs # Specifies how many high-availability (HA) pairs of file servers will - # power your file system. Scale-up file systems are powered by 1 HA - # pair. The default value is 1. FSx for ONTAP scale-out file systems - # are powered by up to 12 HA pairs. The value of this property affects - # the values of `StorageCapacity`, `Iops`, and `ThroughputCapacity`. - # For more information, see [High-availability (HA) pairs][1] in the - # FSx for ONTAP user guide. + # power your file system. First-generation file systems are powered by + # 1 HA pair. Second-generation multi-AZ file systems are powered by 1 + # HA pair. Second generation single-AZ file systems are powered by up + # to 12 HA pairs. The default value is 1. The value of this property + # affects the values of `StorageCapacity`, `Iops`, and + # `ThroughputCapacity`. For more information, see [High-availability + # (HA) pairs][1] in the FSx for ONTAP user guide. Block storage + # protocol support (iSCSI and NVMe over TCP) is disabled on file + # systems with more than 6 HA pairs. For more information, see [Using + # block storage protocols][2]. # # Amazon FSx responds with an HTTP status code 400 (Bad Request) for # the following conditions: @@ -2150,11 +2182,12 @@ class CreateFileSystemLustreMetadataConfiguration < Struct.new( # * The value of `HAPairs` is less than 1 or greater than 12. # # * The value of `HAPairs` is greater than 1 and the value of - # `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1`. + # `DeploymentType` is `SINGLE_AZ_1`, `MULTI_AZ_1`, or `MULTI_AZ_2`. # # # # [1]: https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs + # [2]: https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/supported-fsx-clients.html#using-block-storage # @return [Integer] # # @!attribute [rw] throughput_capacity_per_ha_pair @@ -2164,13 +2197,15 @@ class CreateFileSystemLustreMetadataConfiguration < Struct.new( # You can define either the `ThroughputCapacityPerHAPair` or the # `ThroughputCapacity` when creating a file system, but not both. # - # This field and `ThroughputCapacity` are the same for scale-up file - # systems powered by one HA pair. + # This field and `ThroughputCapacity` are the same for file systems + # powered by one HA pair. # # * For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are # 128, 256, 512, 1024, 2048, or 4096 MBps. # - # * For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 + # * For `SINGLE_AZ_2`, valid values are 1536, 3072, or 6144 MBps. + # + # * For `MULTI_AZ_2`, valid values are 384, 768, 1536, 3072, or 6144 # MBps. # # Amazon FSx responds with an HTTP status code 400 (Bad Request) for @@ -2181,8 +2216,8 @@ class CreateFileSystemLustreMetadataConfiguration < Struct.new( # systems with one HA pair. # # * The value of deployment type is `SINGLE_AZ_2` and - # `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA - # pair (a value between 2 and 12). + # `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is not a + # valid HA pair (a value between 1 and 12). # # * The value of `ThroughputCapacityPerHAPair` is not a valid value. # @return [Integer] @@ -2243,31 +2278,36 @@ class CreateFileSystemOntapConfiguration < Struct.new( # @return [String] # # @!attribute [rw] deployment_type - # Specifies the file system deployment type. Single AZ deployment - # types are configured for redundancy within a single Availability - # Zone in an Amazon Web Services Region . Valid values are the + # Specifies the file system deployment type. Valid values are the # following: # - # * `MULTI_AZ_1`- Creates file systems with high availability that are - # configured for Multi-AZ redundancy to tolerate temporary - # unavailability in Availability Zones (AZs). `Multi_AZ_1` is - # available only in the US East (N. Virginia), US East (Ohio), US - # West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and - # Europe (Ireland) Amazon Web Services Regions. + # * `MULTI_AZ_1`- Creates file systems with high availability and + # durability by replicating your data and supporting failover across + # multiple Availability Zones in the same Amazon Web Services + # Region. # - # * `SINGLE_AZ_1`- Creates file systems with throughput capacities of - # 64 - 4,096 MB/s. `Single_AZ_1` is available in all Amazon Web - # Services Regions where Amazon FSx for OpenZFS is available. + # * `SINGLE_AZ_HA_2`- Creates file systems with high availability and + # throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC + # cache by deploying a primary and standby file system within the + # same Availability Zone. + # + # * `SINGLE_AZ_HA_1`- Creates file systems with high availability and + # throughput capacities of 64 - 4,096 MB/s by deploying a primary + # and standby file system within the same Availability Zone. # # * `SINGLE_AZ_2`- Creates file systems with throughput capacities of - # 160 - 10,240 MB/s using an NVMe L2ARC cache. `Single_AZ_2` is - # available only in the US East (N. Virginia), US East (Ohio), US - # West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and - # Europe (Ireland) Amazon Web Services Regions. + # 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically + # recover within a single Availability Zone. # - # For more information, see [Deployment type availability][1] and - # [File system performance][2] in the *Amazon FSx for OpenZFS User - # Guide*. + # * `SINGLE_AZ_1`- Creates file systems with throughput capacities of + # 64 - 4,096 MBs that automatically recover within a single + # Availability Zone. + # + # For a list of which Amazon Web Services Regions each deployment type + # is available in, see [Deployment type availability][1]. For more + # information on the differences in performance between deployment + # types, see [File system performance][2] in the *Amazon FSx for + # OpenZFS User Guide*. # # # @@ -5541,7 +5581,8 @@ class FileCache < Struct.new( # @return [String] # # @!attribute [rw] failure_details - # A structure providing details of any failures that occurred. + # A structure providing details of any failures that occurred in + # creating a cache. # @return [Types::FileCacheFailureDetails] # # @!attribute [rw] storage_capacity @@ -6829,14 +6870,21 @@ class NotServiceResourceError < Struct.new( # Specifies the FSx for ONTAP file system deployment type in use in # the file system. # - # * `MULTI_AZ_1` - (Default) A high availability file system - # configured for Multi-AZ redundancy to tolerate temporary - # Availability Zone (AZ) unavailability. + # * `MULTI_AZ_1` - A high availability file system configured for + # Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) + # unavailability. This is a first-generation FSx for ONTAP file + # system. + # + # * `MULTI_AZ_2` - A high availability file system configured for + # Multi-AZ redundancy to tolerate temporary AZ unavailability. This + # is a second-generation FSx for ONTAP file system. # # * `SINGLE_AZ_1` - A file system configured for Single-AZ redundancy. + # This is a first-generation FSx for ONTAP file system. # # * `SINGLE_AZ_2` - A file system configured with multiple - # high-availability (HA) pairs for Single-AZ redundancy. + # high-availability (HA) pairs for Single-AZ redundancy. This is a + # second-generation FSx for ONTAP file system. # # For information about the use cases for Multi-AZ and Single-AZ # deployments, refer to [Choosing Multi-AZ or Single-AZ file system @@ -6925,7 +6973,7 @@ class NotServiceResourceError < Struct.new( # * The value of `HAPairs` is less than 1 or greater than 12. # # * The value of `HAPairs` is greater than 1 and the value of - # `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1`. + # `DeploymentType` is `SINGLE_AZ_1`, `MULTI_AZ_1`, or `MULTI_AZ_2`. # # # @@ -6943,10 +6991,13 @@ class NotServiceResourceError < Struct.new( # This field and `ThroughputCapacity` are the same for file systems # with one HA pair. # - # * For `SINGLE_AZ_1` and `MULTI_AZ_1`, valid values are 128, 256, - # 512, 1024, 2048, or 4096 MBps. + # * For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are + # 128, 256, 512, 1024, 2048, or 4096 MBps. # - # * For `SINGLE_AZ_2`, valid values are 3072 or 6144 MBps. + # * For `SINGLE_AZ_2`, valid values are 1536, 3072, or 6144 MBps. + # + # * For `MULTI_AZ_2`, valid values are 384, 768, 1536, 3072, or 6144 + # MBps. # # Amazon FSx responds with an HTTP status code 400 (Bad Request) for # the following conditions: @@ -6955,8 +7006,8 @@ class NotServiceResourceError < Struct.new( # `ThroughputCapacityPerHAPair` are not the same value. # # * The value of deployment type is `SINGLE_AZ_2` and - # `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA - # pair (a value between 2 and 12). + # `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is not a + # valid HA pair (a value between 1 and 12). # # * The value of `ThroughputCapacityPerHAPair` is not a valid value. # @return [Integer] @@ -7293,7 +7344,8 @@ class OpenZFSCreateRootVolumeConfiguration < Struct.new( # # @!attribute [rw] deployment_type # Specifies the file-system deployment type. Amazon FSx for OpenZFS - # supports
 `MULTI_AZ_1`, `SINGLE_AZ_1`, and `SINGLE_AZ_2`. + # supports
 `MULTI_AZ_1`, `SINGLE_AZ_HA_2`, `SINGLE_AZ_HA_1`, + # `SINGLE_AZ_2`, and `SINGLE_AZ_1`. # @return [String] # # @!attribute [rw] throughput_capacity @@ -9161,10 +9213,13 @@ class UpdateFileSystemLustreMetadataConfiguration < Struct.new( # This field and `ThroughputCapacity` are the same for file systems # with one HA pair. # - # * For `SINGLE_AZ_1` and `MULTI_AZ_1`, valid values are 128, 256, - # 512, 1024, 2048, or 4096 MBps. + # * For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are + # 128, 256, 512, 1024, 2048, or 4096 MBps. # - # * For `SINGLE_AZ_2`, valid values are 3072 or 6144 MBps. + # * For `SINGLE_AZ_2`, valid values are 1536, 3072, or 6144 MBps. + # + # * For `MULTI_AZ_2`, valid values are 384, 768, 1536, 3072, or 6144 + # MBps. # # Amazon FSx responds with an HTTP status code 400 (Bad Request) for # the following conditions: @@ -9174,12 +9229,28 @@ class UpdateFileSystemLustreMetadataConfiguration < Struct.new( # systems with one HA pair. # # * The value of deployment type is `SINGLE_AZ_2` and - # `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA - # pair (a value between 2 and 12). + # `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is not a + # valid HA pair (a value between 1 and 12). # # * The value of `ThroughputCapacityPerHAPair` is not a valid value. # @return [Integer] # + # @!attribute [rw] ha_pairs + # Use to update the number of high-availability (HA) pairs for a + # second-generation single-AZ file system. If you increase the number + # of HA pairs for your file system, you must specify proportional + # increases for `StorageCapacity`, `Iops`, and `ThroughputCapacity`. + # For more information, see [High-availability (HA) pairs][1] in the + # FSx for ONTAP user guide. Block storage protocol support (iSCSI and + # NVMe over TCP) is disabled on file systems with more than 6 HA + # pairs. For more information, see [Using block storage protocols][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs + # [2]: https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/supported-fsx-clients.html#using-block-storage + # @return [Integer] + # # @see http://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/UpdateFileSystemOntapConfiguration AWS API Documentation # class UpdateFileSystemOntapConfiguration < Struct.new( @@ -9191,7 +9262,8 @@ class UpdateFileSystemOntapConfiguration < Struct.new( :throughput_capacity, :add_route_table_ids, :remove_route_table_ids, - :throughput_capacity_per_ha_pair) + :throughput_capacity_per_ha_pair, + :ha_pairs) SENSITIVE = [:fsx_admin_password] include Aws::Structure end diff --git a/gems/aws-sdk-fsx/sig/client.rbs b/gems/aws-sdk-fsx/sig/client.rbs index 1eb7b034406..f3631d0ae15 100644 --- a/gems/aws-sdk-fsx/sig/client.rbs +++ b/gems/aws-sdk-fsx/sig/client.rbs @@ -330,7 +330,7 @@ module Aws ?ontap_configuration: { automatic_backup_retention_days: ::Integer?, daily_automatic_backup_start_time: ::String?, - deployment_type: ("MULTI_AZ_1" | "SINGLE_AZ_1" | "SINGLE_AZ_2"), + deployment_type: ("MULTI_AZ_1" | "SINGLE_AZ_1" | "SINGLE_AZ_2" | "MULTI_AZ_2"), endpoint_ip_address_range: ::String?, fsx_admin_password: ::String?, disk_iops_configuration: { @@ -350,7 +350,7 @@ module Aws copy_tags_to_backups: bool?, copy_tags_to_volumes: bool?, daily_automatic_backup_start_time: ::String?, - deployment_type: ("SINGLE_AZ_1" | "SINGLE_AZ_2" | "MULTI_AZ_1"), + deployment_type: ("SINGLE_AZ_1" | "SINGLE_AZ_2" | "SINGLE_AZ_HA_1" | "SINGLE_AZ_HA_2" | "MULTI_AZ_1"), throughput_capacity: ::Integer, weekly_maintenance_start_time: ::String?, disk_iops_configuration: { @@ -465,7 +465,7 @@ module Aws copy_tags_to_backups: bool?, copy_tags_to_volumes: bool?, daily_automatic_backup_start_time: ::String?, - deployment_type: ("SINGLE_AZ_1" | "SINGLE_AZ_2" | "MULTI_AZ_1"), + deployment_type: ("SINGLE_AZ_1" | "SINGLE_AZ_2" | "SINGLE_AZ_HA_1" | "SINGLE_AZ_HA_2" | "MULTI_AZ_1"), throughput_capacity: ::Integer, weekly_maintenance_start_time: ::String?, disk_iops_configuration: { @@ -1186,7 +1186,8 @@ module Aws throughput_capacity: ::Integer?, add_route_table_ids: Array[::String]?, remove_route_table_ids: Array[::String]?, - throughput_capacity_per_ha_pair: ::Integer? + throughput_capacity_per_ha_pair: ::Integer?, + ha_pairs: ::Integer? }, ?open_zfs_configuration: { automatic_backup_retention_days: ::Integer?, diff --git a/gems/aws-sdk-fsx/sig/types.rbs b/gems/aws-sdk-fsx/sig/types.rbs index 26159d65da2..4f275791584 100644 --- a/gems/aws-sdk-fsx/sig/types.rbs +++ b/gems/aws-sdk-fsx/sig/types.rbs @@ -23,10 +23,10 @@ module Aws::FSx end class AdministrativeAction - attr_accessor administrative_action_type: ("FILE_SYSTEM_UPDATE" | "STORAGE_OPTIMIZATION" | "FILE_SYSTEM_ALIAS_ASSOCIATION" | "FILE_SYSTEM_ALIAS_DISASSOCIATION" | "VOLUME_UPDATE" | "SNAPSHOT_UPDATE" | "RELEASE_NFS_V3_LOCKS" | "VOLUME_RESTORE" | "THROUGHPUT_OPTIMIZATION" | "IOPS_OPTIMIZATION" | "STORAGE_TYPE_OPTIMIZATION" | "MISCONFIGURED_STATE_RECOVERY" | "VOLUME_UPDATE_WITH_SNAPSHOT" | "VOLUME_INITIALIZE_WITH_SNAPSHOT") + attr_accessor administrative_action_type: ("FILE_SYSTEM_UPDATE" | "STORAGE_OPTIMIZATION" | "FILE_SYSTEM_ALIAS_ASSOCIATION" | "FILE_SYSTEM_ALIAS_DISASSOCIATION" | "VOLUME_UPDATE" | "SNAPSHOT_UPDATE" | "RELEASE_NFS_V3_LOCKS" | "VOLUME_RESTORE" | "THROUGHPUT_OPTIMIZATION" | "IOPS_OPTIMIZATION" | "STORAGE_TYPE_OPTIMIZATION" | "MISCONFIGURED_STATE_RECOVERY" | "VOLUME_UPDATE_WITH_SNAPSHOT" | "VOLUME_INITIALIZE_WITH_SNAPSHOT" | "DOWNLOAD_DATA_FROM_BACKUP") attr_accessor progress_percent: ::Integer attr_accessor request_time: ::Time - attr_accessor status: ("FAILED" | "IN_PROGRESS" | "PENDING" | "COMPLETED" | "UPDATED_OPTIMIZING") + attr_accessor status: ("FAILED" | "IN_PROGRESS" | "PENDING" | "COMPLETED" | "UPDATED_OPTIMIZING" | "OPTIMIZING") attr_accessor target_file_system_values: Types::FileSystem attr_accessor failure_details: Types::AdministrativeActionFailureDetails attr_accessor target_volume_values: Types::Volume @@ -313,7 +313,7 @@ module Aws::FSx class CreateFileSystemOntapConfiguration attr_accessor automatic_backup_retention_days: ::Integer attr_accessor daily_automatic_backup_start_time: ::String - attr_accessor deployment_type: ("MULTI_AZ_1" | "SINGLE_AZ_1" | "SINGLE_AZ_2") + attr_accessor deployment_type: ("MULTI_AZ_1" | "SINGLE_AZ_1" | "SINGLE_AZ_2" | "MULTI_AZ_2") attr_accessor endpoint_ip_address_range: ::String attr_accessor fsx_admin_password: ::String attr_accessor disk_iops_configuration: Types::DiskIopsConfiguration @@ -331,7 +331,7 @@ module Aws::FSx attr_accessor copy_tags_to_backups: bool attr_accessor copy_tags_to_volumes: bool attr_accessor daily_automatic_backup_start_time: ::String - attr_accessor deployment_type: ("SINGLE_AZ_1" | "SINGLE_AZ_2" | "MULTI_AZ_1") + attr_accessor deployment_type: ("SINGLE_AZ_1" | "SINGLE_AZ_2" | "SINGLE_AZ_HA_1" | "SINGLE_AZ_HA_2" | "MULTI_AZ_1") attr_accessor throughput_capacity: ::Integer attr_accessor weekly_maintenance_start_time: ::String attr_accessor disk_iops_configuration: Types::DiskIopsConfiguration @@ -1176,7 +1176,7 @@ module Aws::FSx class OntapFileSystemConfiguration attr_accessor automatic_backup_retention_days: ::Integer attr_accessor daily_automatic_backup_start_time: ::String - attr_accessor deployment_type: ("MULTI_AZ_1" | "SINGLE_AZ_1" | "SINGLE_AZ_2") + attr_accessor deployment_type: ("MULTI_AZ_1" | "SINGLE_AZ_1" | "SINGLE_AZ_2" | "MULTI_AZ_2") attr_accessor endpoint_ip_address_range: ::String attr_accessor endpoints: Types::FileSystemEndpoints attr_accessor disk_iops_configuration: Types::DiskIopsConfiguration @@ -1231,7 +1231,7 @@ module Aws::FSx attr_accessor copy_tags_to_backups: bool attr_accessor copy_tags_to_volumes: bool attr_accessor daily_automatic_backup_start_time: ::String - attr_accessor deployment_type: ("SINGLE_AZ_1" | "SINGLE_AZ_2" | "MULTI_AZ_1") + attr_accessor deployment_type: ("SINGLE_AZ_1" | "SINGLE_AZ_2" | "SINGLE_AZ_HA_1" | "SINGLE_AZ_HA_2" | "MULTI_AZ_1") attr_accessor throughput_capacity: ::Integer attr_accessor weekly_maintenance_start_time: ::String attr_accessor disk_iops_configuration: Types::DiskIopsConfiguration @@ -1573,6 +1573,7 @@ module Aws::FSx attr_accessor add_route_table_ids: ::Array[::String] attr_accessor remove_route_table_ids: ::Array[::String] attr_accessor throughput_capacity_per_ha_pair: ::Integer + attr_accessor ha_pairs: ::Integer SENSITIVE: [:fsx_admin_password] end diff --git a/gems/aws-sdk-opensearchservice/CHANGELOG.md b/gems/aws-sdk-opensearchservice/CHANGELOG.md index 72a20f459da..f639e6afc12 100644 --- a/gems/aws-sdk-opensearchservice/CHANGELOG.md +++ b/gems/aws-sdk-opensearchservice/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.50.0 (2024-07-09) +------------------ + +* Feature - This release adds support for enabling or disabling Natural Language Query Processing feature for Amazon OpenSearch Service domains, and provides visibility into the current state of the setup or tear-down. + 1.49.0 (2024-07-02) ------------------ diff --git a/gems/aws-sdk-opensearchservice/VERSION b/gems/aws-sdk-opensearchservice/VERSION index 7f3a46a841e..5a5c7211dc6 100644 --- a/gems/aws-sdk-opensearchservice/VERSION +++ b/gems/aws-sdk-opensearchservice/VERSION @@ -1 +1 @@ -1.49.0 +1.50.0 diff --git a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice.rb b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice.rb index b807f89f4f7..c3942f0209f 100644 --- a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice.rb +++ b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice.rb @@ -52,6 +52,6 @@ # @!group service module Aws::OpenSearchService - GEM_VERSION = '1.49.0' + GEM_VERSION = '1.50.0' end diff --git a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client.rb b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client.rb index 4f5045a8ac5..4a2f57c7777 100644 --- a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client.rb +++ b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client.rb @@ -856,6 +856,9 @@ def cancel_service_software_update(params = {}, options = {}) # @option params [Types::SoftwareUpdateOptions] :software_update_options # Software update options for the domain. # + # @option params [Types::AIMLOptionsInput] :aiml_options + # Options for all machine learning features for the specified domain. + # # @return [Types::CreateDomainResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateDomainResponse#domain_status #domain_status} => Types::DomainStatus @@ -988,6 +991,11 @@ def cancel_service_software_update(params = {}, options = {}) # software_update_options: { # auto_software_update_enabled: false, # }, + # aiml_options: { + # natural_language_query_generation_options: { + # desired_state: "ENABLED", # accepts ENABLED, DISABLED + # }, + # }, # }) # # @example Response structure @@ -1090,6 +1098,8 @@ def cancel_service_software_update(params = {}, options = {}) # resp.domain_status.modifying_properties[0].active_value #=> String # resp.domain_status.modifying_properties[0].pending_value #=> String # resp.domain_status.modifying_properties[0].value_type #=> String, one of "PLAIN_TEXT", "STRINGIFIED_JSON" + # resp.domain_status.aiml_options.natural_language_query_generation_options.desired_state #=> String, one of "ENABLED", "DISABLED" + # resp.domain_status.aiml_options.natural_language_query_generation_options.current_state #=> String, one of "NOT_ENABLED", "ENABLE_COMPLETE", "ENABLE_IN_PROGRESS", "ENABLE_FAILED", "DISABLE_COMPLETE", "DISABLE_IN_PROGRESS", "DISABLE_FAILED" # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/CreateDomain AWS API Documentation # @@ -1455,6 +1465,8 @@ def delete_data_source(params = {}, options = {}) # resp.domain_status.modifying_properties[0].active_value #=> String # resp.domain_status.modifying_properties[0].pending_value #=> String # resp.domain_status.modifying_properties[0].value_type #=> String, one of "PLAIN_TEXT", "STRINGIFIED_JSON" + # resp.domain_status.aiml_options.natural_language_query_generation_options.desired_state #=> String, one of "ENABLED", "DISABLED" + # resp.domain_status.aiml_options.natural_language_query_generation_options.current_state #=> String, one of "NOT_ENABLED", "ENABLE_COMPLETE", "ENABLE_IN_PROGRESS", "ENABLE_FAILED", "DISABLE_COMPLETE", "DISABLE_IN_PROGRESS", "DISABLE_FAILED" # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/DeleteDomain AWS API Documentation # @@ -1753,6 +1765,8 @@ def delete_vpc_endpoint(params = {}, options = {}) # resp.domain_status.modifying_properties[0].active_value #=> String # resp.domain_status.modifying_properties[0].pending_value #=> String # resp.domain_status.modifying_properties[0].value_type #=> String, one of "PLAIN_TEXT", "STRINGIFIED_JSON" + # resp.domain_status.aiml_options.natural_language_query_generation_options.desired_state #=> String, one of "ENABLED", "DISABLED" + # resp.domain_status.aiml_options.natural_language_query_generation_options.current_state #=> String, one of "NOT_ENABLED", "ENABLE_COMPLETE", "ENABLE_IN_PROGRESS", "ENABLE_FAILED", "DISABLE_COMPLETE", "DISABLE_IN_PROGRESS", "DISABLE_FAILED" # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/DescribeDomain AWS API Documentation # @@ -2059,6 +2073,13 @@ def describe_domain_change_progress(params = {}, options = {}) # resp.domain_config.modifying_properties[0].active_value #=> String # resp.domain_config.modifying_properties[0].pending_value #=> String # resp.domain_config.modifying_properties[0].value_type #=> String, one of "PLAIN_TEXT", "STRINGIFIED_JSON" + # resp.domain_config.aiml_options.options.natural_language_query_generation_options.desired_state #=> String, one of "ENABLED", "DISABLED" + # resp.domain_config.aiml_options.options.natural_language_query_generation_options.current_state #=> String, one of "NOT_ENABLED", "ENABLE_COMPLETE", "ENABLE_IN_PROGRESS", "ENABLE_FAILED", "DISABLE_COMPLETE", "DISABLE_IN_PROGRESS", "DISABLE_FAILED" + # resp.domain_config.aiml_options.status.creation_date #=> Time + # resp.domain_config.aiml_options.status.update_date #=> Time + # resp.domain_config.aiml_options.status.update_version #=> Integer + # resp.domain_config.aiml_options.status.state #=> String, one of "RequiresIndexDocuments", "Processing", "Active" + # resp.domain_config.aiml_options.status.pending_deletion #=> Boolean # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/DescribeDomainConfig AWS API Documentation # @@ -2286,6 +2307,8 @@ def describe_domain_nodes(params = {}, options = {}) # resp.domain_status_list[0].modifying_properties[0].active_value #=> String # resp.domain_status_list[0].modifying_properties[0].pending_value #=> String # resp.domain_status_list[0].modifying_properties[0].value_type #=> String, one of "PLAIN_TEXT", "STRINGIFIED_JSON" + # resp.domain_status_list[0].aiml_options.natural_language_query_generation_options.desired_state #=> String, one of "ENABLED", "DISABLED" + # resp.domain_status_list[0].aiml_options.natural_language_query_generation_options.current_state #=> String, one of "NOT_ENABLED", "ENABLE_COMPLETE", "ENABLE_IN_PROGRESS", "ENABLE_FAILED", "DISABLE_COMPLETE", "DISABLE_IN_PROGRESS", "DISABLE_FAILED" # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/DescribeDomains AWS API Documentation # @@ -2436,6 +2459,8 @@ def describe_domains(params = {}, options = {}) # resp.dry_run_config.modifying_properties[0].active_value #=> String # resp.dry_run_config.modifying_properties[0].pending_value #=> String # resp.dry_run_config.modifying_properties[0].value_type #=> String, one of "PLAIN_TEXT", "STRINGIFIED_JSON" + # resp.dry_run_config.aiml_options.natural_language_query_generation_options.desired_state #=> String, one of "ENABLED", "DISABLED" + # resp.dry_run_config.aiml_options.natural_language_query_generation_options.current_state #=> String, one of "NOT_ENABLED", "ENABLE_COMPLETE", "ENABLE_IN_PROGRESS", "ENABLE_FAILED", "DISABLE_COMPLETE", "DISABLE_IN_PROGRESS", "DISABLE_FAILED" # resp.dry_run_results.deployment_type #=> String # resp.dry_run_results.message #=> String # @@ -4179,6 +4204,9 @@ def update_data_source(params = {}, options = {}) # @option params [Types::SoftwareUpdateOptions] :software_update_options # Service software update options for the domain. # + # @option params [Types::AIMLOptionsInput] :aiml_options + # Options for all machine learning features for the specified domain. + # # @return [Types::UpdateDomainConfigResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::UpdateDomainConfigResponse#domain_config #domain_config} => Types::DomainConfig @@ -4309,6 +4337,11 @@ def update_data_source(params = {}, options = {}) # software_update_options: { # auto_software_update_enabled: false, # }, + # aiml_options: { + # natural_language_query_generation_options: { + # desired_state: "ENABLED", # accepts ENABLED, DISABLED + # }, + # }, # }) # # @example Response structure @@ -4481,6 +4514,13 @@ def update_data_source(params = {}, options = {}) # resp.domain_config.modifying_properties[0].active_value #=> String # resp.domain_config.modifying_properties[0].pending_value #=> String # resp.domain_config.modifying_properties[0].value_type #=> String, one of "PLAIN_TEXT", "STRINGIFIED_JSON" + # resp.domain_config.aiml_options.options.natural_language_query_generation_options.desired_state #=> String, one of "ENABLED", "DISABLED" + # resp.domain_config.aiml_options.options.natural_language_query_generation_options.current_state #=> String, one of "NOT_ENABLED", "ENABLE_COMPLETE", "ENABLE_IN_PROGRESS", "ENABLE_FAILED", "DISABLE_COMPLETE", "DISABLE_IN_PROGRESS", "DISABLE_FAILED" + # resp.domain_config.aiml_options.status.creation_date #=> Time + # resp.domain_config.aiml_options.status.update_date #=> Time + # resp.domain_config.aiml_options.status.update_version #=> Integer + # resp.domain_config.aiml_options.status.state #=> String, one of "RequiresIndexDocuments", "Processing", "Active" + # resp.domain_config.aiml_options.status.pending_deletion #=> Boolean # resp.dry_run_results.deployment_type #=> String # resp.dry_run_results.message #=> String # resp.dry_run_progress_status.dry_run_id #=> String @@ -4774,7 +4814,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-opensearchservice' - context[:gem_version] = '1.49.0' + context[:gem_version] = '1.50.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client_api.rb b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client_api.rb index 73c2a7b5ad0..dc5e1bd820e 100644 --- a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client_api.rb +++ b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/client_api.rb @@ -13,6 +13,9 @@ module ClientApi include Seahorse::Model + AIMLOptionsInput = Shapes::StructureShape.new(name: 'AIMLOptionsInput') + AIMLOptionsOutput = Shapes::StructureShape.new(name: 'AIMLOptionsOutput') + AIMLOptionsStatus = Shapes::StructureShape.new(name: 'AIMLOptionsStatus') ARN = Shapes::StringShape.new(name: 'ARN') AWSAccount = Shapes::StringShape.new(name: 'AWSAccount') AWSDomainInformation = Shapes::StructureShape.new(name: 'AWSDomainInformation') @@ -294,6 +297,10 @@ module ClientApi MinimumInstanceCount = Shapes::IntegerShape.new(name: 'MinimumInstanceCount') ModifyingProperties = Shapes::StructureShape.new(name: 'ModifyingProperties') ModifyingPropertiesList = Shapes::ListShape.new(name: 'ModifyingPropertiesList') + NaturalLanguageQueryGenerationCurrentState = Shapes::StringShape.new(name: 'NaturalLanguageQueryGenerationCurrentState') + NaturalLanguageQueryGenerationDesiredState = Shapes::StringShape.new(name: 'NaturalLanguageQueryGenerationDesiredState') + NaturalLanguageQueryGenerationOptionsInput = Shapes::StructureShape.new(name: 'NaturalLanguageQueryGenerationOptionsInput') + NaturalLanguageQueryGenerationOptionsOutput = Shapes::StructureShape.new(name: 'NaturalLanguageQueryGenerationOptionsOutput') NextToken = Shapes::StringShape.new(name: 'NextToken') NodeId = Shapes::StringShape.new(name: 'NodeId') NodeStatus = Shapes::StringShape.new(name: 'NodeStatus') @@ -459,6 +466,16 @@ module ClientApi ZoneAwarenessConfig = Shapes::StructureShape.new(name: 'ZoneAwarenessConfig') ZoneStatus = Shapes::StringShape.new(name: 'ZoneStatus') + AIMLOptionsInput.add_member(:natural_language_query_generation_options, Shapes::ShapeRef.new(shape: NaturalLanguageQueryGenerationOptionsInput, location_name: "NaturalLanguageQueryGenerationOptions")) + AIMLOptionsInput.struct_class = Types::AIMLOptionsInput + + AIMLOptionsOutput.add_member(:natural_language_query_generation_options, Shapes::ShapeRef.new(shape: NaturalLanguageQueryGenerationOptionsOutput, location_name: "NaturalLanguageQueryGenerationOptions")) + AIMLOptionsOutput.struct_class = Types::AIMLOptionsOutput + + AIMLOptionsStatus.add_member(:options, Shapes::ShapeRef.new(shape: AIMLOptionsOutput, location_name: "Options")) + AIMLOptionsStatus.add_member(:status, Shapes::ShapeRef.new(shape: OptionStatus, location_name: "Status")) + AIMLOptionsStatus.struct_class = Types::AIMLOptionsStatus + AWSDomainInformation.add_member(:owner_id, Shapes::ShapeRef.new(shape: OwnerId, location_name: "OwnerId")) AWSDomainInformation.add_member(:domain_name, Shapes::ShapeRef.new(shape: DomainName, required: true, location_name: "DomainName")) AWSDomainInformation.add_member(:region, Shapes::ShapeRef.new(shape: Region, location_name: "Region")) @@ -713,6 +730,7 @@ module ClientApi CreateDomainRequest.add_member(:auto_tune_options, Shapes::ShapeRef.new(shape: AutoTuneOptionsInput, location_name: "AutoTuneOptions")) CreateDomainRequest.add_member(:off_peak_window_options, Shapes::ShapeRef.new(shape: OffPeakWindowOptions, location_name: "OffPeakWindowOptions")) CreateDomainRequest.add_member(:software_update_options, Shapes::ShapeRef.new(shape: SoftwareUpdateOptions, location_name: "SoftwareUpdateOptions")) + CreateDomainRequest.add_member(:aiml_options, Shapes::ShapeRef.new(shape: AIMLOptionsInput, location_name: "AIMLOptions")) CreateDomainRequest.struct_class = Types::CreateDomainRequest CreateDomainResponse.add_member(:domain_status, Shapes::ShapeRef.new(shape: DomainStatus, location_name: "DomainStatus")) @@ -971,6 +989,7 @@ module ClientApi DomainConfig.add_member(:off_peak_window_options, Shapes::ShapeRef.new(shape: OffPeakWindowOptionsStatus, location_name: "OffPeakWindowOptions")) DomainConfig.add_member(:software_update_options, Shapes::ShapeRef.new(shape: SoftwareUpdateOptionsStatus, location_name: "SoftwareUpdateOptions")) DomainConfig.add_member(:modifying_properties, Shapes::ShapeRef.new(shape: ModifyingPropertiesList, location_name: "ModifyingProperties")) + DomainConfig.add_member(:aiml_options, Shapes::ShapeRef.new(shape: AIMLOptionsStatus, location_name: "AIMLOptions")) DomainConfig.struct_class = Types::DomainConfig DomainEndpointOptions.add_member(:enforce_https, Shapes::ShapeRef.new(shape: Boolean, location_name: "EnforceHTTPS")) @@ -1064,6 +1083,7 @@ module ClientApi DomainStatus.add_member(:software_update_options, Shapes::ShapeRef.new(shape: SoftwareUpdateOptions, location_name: "SoftwareUpdateOptions")) DomainStatus.add_member(:domain_processing_status, Shapes::ShapeRef.new(shape: DomainProcessingStatusType, location_name: "DomainProcessingStatus")) DomainStatus.add_member(:modifying_properties, Shapes::ShapeRef.new(shape: ModifyingPropertiesList, location_name: "ModifyingProperties")) + DomainStatus.add_member(:aiml_options, Shapes::ShapeRef.new(shape: AIMLOptionsOutput, location_name: "AIMLOptions")) DomainStatus.struct_class = Types::DomainStatus DomainStatusList.member = Shapes::ShapeRef.new(shape: DomainStatus) @@ -1370,6 +1390,13 @@ module ClientApi ModifyingPropertiesList.member = Shapes::ShapeRef.new(shape: ModifyingProperties) + NaturalLanguageQueryGenerationOptionsInput.add_member(:desired_state, Shapes::ShapeRef.new(shape: NaturalLanguageQueryGenerationDesiredState, location_name: "DesiredState")) + NaturalLanguageQueryGenerationOptionsInput.struct_class = Types::NaturalLanguageQueryGenerationOptionsInput + + NaturalLanguageQueryGenerationOptionsOutput.add_member(:desired_state, Shapes::ShapeRef.new(shape: NaturalLanguageQueryGenerationDesiredState, location_name: "DesiredState")) + NaturalLanguageQueryGenerationOptionsOutput.add_member(:current_state, Shapes::ShapeRef.new(shape: NaturalLanguageQueryGenerationCurrentState, location_name: "CurrentState")) + NaturalLanguageQueryGenerationOptionsOutput.struct_class = Types::NaturalLanguageQueryGenerationOptionsOutput + NodeToNodeEncryptionOptions.add_member(:enabled, Shapes::ShapeRef.new(shape: Boolean, location_name: "Enabled")) NodeToNodeEncryptionOptions.struct_class = Types::NodeToNodeEncryptionOptions @@ -1646,6 +1673,7 @@ module ClientApi UpdateDomainConfigRequest.add_member(:dry_run_mode, Shapes::ShapeRef.new(shape: DryRunMode, location_name: "DryRunMode")) UpdateDomainConfigRequest.add_member(:off_peak_window_options, Shapes::ShapeRef.new(shape: OffPeakWindowOptions, location_name: "OffPeakWindowOptions")) UpdateDomainConfigRequest.add_member(:software_update_options, Shapes::ShapeRef.new(shape: SoftwareUpdateOptions, location_name: "SoftwareUpdateOptions")) + UpdateDomainConfigRequest.add_member(:aiml_options, Shapes::ShapeRef.new(shape: AIMLOptionsInput, location_name: "AIMLOptions")) UpdateDomainConfigRequest.struct_class = Types::UpdateDomainConfigRequest UpdateDomainConfigResponse.add_member(:domain_config, Shapes::ShapeRef.new(shape: DomainConfig, required: true, location_name: "DomainConfig")) diff --git a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/types.rb b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/types.rb index 6b64f1b8732..6743c5fb147 100644 --- a/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/types.rb +++ b/gems/aws-sdk-opensearchservice/lib/aws-sdk-opensearchservice/types.rb @@ -10,6 +10,57 @@ module Aws::OpenSearchService module Types + # Container for parameters required to enable all machine learning + # features. + # + # @!attribute [rw] natural_language_query_generation_options + # Container for parameters required for natural language query + # generation on the specified domain. + # @return [Types::NaturalLanguageQueryGenerationOptionsInput] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/AIMLOptionsInput AWS API Documentation + # + class AIMLOptionsInput < Struct.new( + :natural_language_query_generation_options) + SENSITIVE = [] + include Aws::Structure + end + + # Container for parameters representing the state of machine learning + # features on the specified domain. + # + # @!attribute [rw] natural_language_query_generation_options + # Container for parameters required for natural language query + # generation on the specified domain. + # @return [Types::NaturalLanguageQueryGenerationOptionsOutput] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/AIMLOptionsOutput AWS API Documentation + # + class AIMLOptionsOutput < Struct.new( + :natural_language_query_generation_options) + SENSITIVE = [] + include Aws::Structure + end + + # The status of machine learning options on the specified domain. + # + # @!attribute [rw] options + # Machine learning options on the specified domain. + # @return [Types::AIMLOptionsOutput] + # + # @!attribute [rw] status + # Provides the current status of an entity. + # @return [Types::OptionStatus] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/AIMLOptionsStatus AWS API Documentation + # + class AIMLOptionsStatus < Struct.new( + :options, + :status) + SENSITIVE = [] + include Aws::Structure + end + # Information about an Amazon OpenSearch Service domain. # # @!attribute [rw] owner_id @@ -1394,6 +1445,10 @@ class ConnectionProperties < Struct.new( # Software update options for the domain. # @return [Types::SoftwareUpdateOptions] # + # @!attribute [rw] aiml_options + # Options for all machine learning features for the specified domain. + # @return [Types::AIMLOptionsInput] + # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/CreateDomainRequest AWS API Documentation # class CreateDomainRequest < Struct.new( @@ -1415,7 +1470,8 @@ class CreateDomainRequest < Struct.new( :tag_list, :auto_tune_options, :off_peak_window_options, - :software_update_options) + :software_update_options, + :aiml_options) SENSITIVE = [] include Aws::Structure end @@ -2715,6 +2771,11 @@ class DissociatePackageResponse < Struct.new( # modified. # @return [Array] # + # @!attribute [rw] aiml_options + # Container for parameters required to enable all machine learning + # features. + # @return [Types::AIMLOptionsStatus] + # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/DomainConfig AWS API Documentation # class DomainConfig < Struct.new( @@ -2736,7 +2797,8 @@ class DomainConfig < Struct.new( :change_progress_details, :off_peak_window_options, :software_update_options, - :modifying_properties) + :modifying_properties, + :aiml_options) SENSITIVE = [] include Aws::Structure end @@ -3167,6 +3229,11 @@ class DomainPackageDetails < Struct.new( # modified. # @return [Array] # + # @!attribute [rw] aiml_options + # Container for parameters required to enable all machine learning + # features. + # @return [Types::AIMLOptionsOutput] + # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/DomainStatus AWS API Documentation # class DomainStatus < Struct.new( @@ -3201,7 +3268,8 @@ class DomainStatus < Struct.new( :off_peak_window_options, :software_update_options, :domain_processing_status, - :modifying_properties) + :modifying_properties, + :aiml_options) SENSITIVE = [] include Aws::Structure end @@ -4696,6 +4764,44 @@ class ModifyingProperties < Struct.new( include Aws::Structure end + # Container for parameters required to enable the natural language query + # generation feature. + # + # @!attribute [rw] desired_state + # The desired state of the natural language query generation feature. + # Valid values are ENABLED and DISABLED. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/NaturalLanguageQueryGenerationOptionsInput AWS API Documentation + # + class NaturalLanguageQueryGenerationOptionsInput < Struct.new( + :desired_state) + SENSITIVE = [] + include Aws::Structure + end + + # Container for parameters representing the state of the natural + # language query generation feature on the specified domain. + # + # @!attribute [rw] desired_state + # The desired state of the natural language query generation feature. + # Valid values are ENABLED and DISABLED. + # @return [String] + # + # @!attribute [rw] current_state + # The current state of the natural language query generation feature, + # indicating completion, in progress, or failure. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/NaturalLanguageQueryGenerationOptionsOutput AWS API Documentation + # + class NaturalLanguageQueryGenerationOptionsOutput < Struct.new( + :desired_state, + :current_state) + SENSITIVE = [] + include Aws::Structure + end + # Enables or disables node-to-node encryption. For more information, see # [Node-to-node encryption for Amazon OpenSearch Service][1]. # @@ -6115,6 +6221,10 @@ class UpdateDataSourceResponse < Struct.new( # Service software update options for the domain. # @return [Types::SoftwareUpdateOptions] # + # @!attribute [rw] aiml_options + # Options for all machine learning features for the specified domain. + # @return [Types::AIMLOptionsInput] + # # @see http://docs.aws.amazon.com/goto/WebAPI/opensearch-2021-01-01/UpdateDomainConfigRequest AWS API Documentation # class UpdateDomainConfigRequest < Struct.new( @@ -6136,7 +6246,8 @@ class UpdateDomainConfigRequest < Struct.new( :dry_run, :dry_run_mode, :off_peak_window_options, - :software_update_options) + :software_update_options, + :aiml_options) SENSITIVE = [] include Aws::Structure end diff --git a/gems/aws-sdk-opensearchservice/sig/client.rbs b/gems/aws-sdk-opensearchservice/sig/client.rbs index f1f95223ec4..0e3a491c5f0 100644 --- a/gems/aws-sdk-opensearchservice/sig/client.rbs +++ b/gems/aws-sdk-opensearchservice/sig/client.rbs @@ -283,6 +283,11 @@ module Aws }, ?software_update_options: { auto_software_update_enabled: bool? + }, + ?aiml_options: { + natural_language_query_generation_options: { + desired_state: ("ENABLED" | "DISABLED")? + }? } ) -> _CreateDomainResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateDomainResponseSuccess @@ -1062,6 +1067,11 @@ module Aws }, ?software_update_options: { auto_software_update_enabled: bool? + }, + ?aiml_options: { + natural_language_query_generation_options: { + desired_state: ("ENABLED" | "DISABLED")? + }? } ) -> _UpdateDomainConfigResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateDomainConfigResponseSuccess diff --git a/gems/aws-sdk-opensearchservice/sig/types.rbs b/gems/aws-sdk-opensearchservice/sig/types.rbs index 50fb9a0e66a..715d2d6a5c2 100644 --- a/gems/aws-sdk-opensearchservice/sig/types.rbs +++ b/gems/aws-sdk-opensearchservice/sig/types.rbs @@ -8,6 +8,22 @@ module Aws::OpenSearchService module Types + class AIMLOptionsInput + attr_accessor natural_language_query_generation_options: Types::NaturalLanguageQueryGenerationOptionsInput + SENSITIVE: [] + end + + class AIMLOptionsOutput + attr_accessor natural_language_query_generation_options: Types::NaturalLanguageQueryGenerationOptionsOutput + SENSITIVE: [] + end + + class AIMLOptionsStatus + attr_accessor options: Types::AIMLOptionsOutput + attr_accessor status: Types::OptionStatus + SENSITIVE: [] + end + class AWSDomainInformation attr_accessor owner_id: ::String attr_accessor domain_name: ::String @@ -328,6 +344,7 @@ module Aws::OpenSearchService attr_accessor auto_tune_options: Types::AutoTuneOptionsInput attr_accessor off_peak_window_options: Types::OffPeakWindowOptions attr_accessor software_update_options: Types::SoftwareUpdateOptions + attr_accessor aiml_options: Types::AIMLOptionsInput SENSITIVE: [] end @@ -697,6 +714,7 @@ module Aws::OpenSearchService attr_accessor off_peak_window_options: Types::OffPeakWindowOptionsStatus attr_accessor software_update_options: Types::SoftwareUpdateOptionsStatus attr_accessor modifying_properties: ::Array[Types::ModifyingProperties] + attr_accessor aiml_options: Types::AIMLOptionsStatus SENSITIVE: [] end @@ -796,6 +814,7 @@ module Aws::OpenSearchService attr_accessor software_update_options: Types::SoftwareUpdateOptions attr_accessor domain_processing_status: ("Creating" | "Active" | "Modifying" | "UpgradingEngineVersion" | "UpdatingServiceSoftware" | "Isolated" | "Deleting") attr_accessor modifying_properties: ::Array[Types::ModifyingProperties] + attr_accessor aiml_options: Types::AIMLOptionsOutput SENSITIVE: [] end @@ -1196,6 +1215,17 @@ module Aws::OpenSearchService SENSITIVE: [] end + class NaturalLanguageQueryGenerationOptionsInput + attr_accessor desired_state: ("ENABLED" | "DISABLED") + SENSITIVE: [] + end + + class NaturalLanguageQueryGenerationOptionsOutput + attr_accessor desired_state: ("ENABLED" | "DISABLED") + attr_accessor current_state: ("NOT_ENABLED" | "ENABLE_COMPLETE" | "ENABLE_IN_PROGRESS" | "ENABLE_FAILED" | "DISABLE_COMPLETE" | "DISABLE_IN_PROGRESS" | "DISABLE_FAILED") + SENSITIVE: [] + end + class NodeToNodeEncryptionOptions attr_accessor enabled: bool SENSITIVE: [] @@ -1536,6 +1566,7 @@ module Aws::OpenSearchService attr_accessor dry_run_mode: ("Basic" | "Verbose") attr_accessor off_peak_window_options: Types::OffPeakWindowOptions attr_accessor software_update_options: Types::SoftwareUpdateOptions + attr_accessor aiml_options: Types::AIMLOptionsInput SENSITIVE: [] end diff --git a/gems/aws-sdk-sagemaker/CHANGELOG.md b/gems/aws-sdk-sagemaker/CHANGELOG.md index 5fe6b73b83a..e3237e239ae 100644 --- a/gems/aws-sdk-sagemaker/CHANGELOG.md +++ b/gems/aws-sdk-sagemaker/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.253.0 (2024-07-09) +------------------ + +* Feature - This release 1/ enables optimization jobs that allows customers to perform Ahead-of-time compilation and quantization. 2/ allows customers to control access to Amazon Q integration in SageMaker Studio. 3/ enables AdditionalModelDataSources for CreateModel action. + 1.252.0 (2024-07-02) ------------------ diff --git a/gems/aws-sdk-sagemaker/VERSION b/gems/aws-sdk-sagemaker/VERSION index 455aaf0cace..2dcfeac491b 100644 --- a/gems/aws-sdk-sagemaker/VERSION +++ b/gems/aws-sdk-sagemaker/VERSION @@ -1 +1 @@ -1.252.0 +1.253.0 diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker.rb index 0d7db8c4dc1..95985cb101a 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker.rb @@ -53,6 +53,6 @@ # @!group service module Aws::SageMaker - GEM_VERSION = '1.252.0' + GEM_VERSION = '1.253.0' end diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client.rb index 1bc4434cd5f..9bf0cd43d3f 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client.rb @@ -2688,6 +2688,10 @@ def create_device_fleet(params = {}, options = {}) # enable_docker_access: "ENABLED", # accepts ENABLED, DISABLED # vpc_only_trusted_accounts: ["AccountId"], # }, + # amazon_q_settings: { + # status: "ENABLED", # accepts ENABLED, DISABLED + # q_profile_arn: "QProfileArn", + # }, # }, # subnet_ids: ["SubnetId"], # required # vpc_id: "VpcId", # required @@ -5686,6 +5690,22 @@ def create_mlflow_tracking_server(params = {}, options = {}) # }, # }, # }, + # additional_model_data_sources: [ + # { + # channel_name: "AdditionalModelChannelName", # required + # s3_data_source: { # required + # s3_uri: "S3ModelUri", # required + # s3_data_type: "S3Prefix", # required, accepts S3Prefix, S3Object + # compression_type: "None", # required, accepts None, Gzip + # model_access_config: { + # accept_eula: false, # required + # }, + # hub_access_config: { + # hub_content_arn: "HubContentArn", # required + # }, + # }, + # }, + # ], # environment: { # "EnvironmentKey" => "EnvironmentValue", # }, @@ -5720,6 +5740,22 @@ def create_mlflow_tracking_server(params = {}, options = {}) # }, # }, # }, + # additional_model_data_sources: [ + # { + # channel_name: "AdditionalModelChannelName", # required + # s3_data_source: { # required + # s3_uri: "S3ModelUri", # required + # s3_data_type: "S3Prefix", # required, accepts S3Prefix, S3Object + # compression_type: "None", # required, accepts None, Gzip + # model_access_config: { + # accept_eula: false, # required + # }, + # hub_access_config: { + # hub_content_arn: "HubContentArn", # required + # }, + # }, + # }, + # ], # environment: { # "EnvironmentKey" => "EnvironmentValue", # }, @@ -7330,6 +7366,169 @@ def create_notebook_instance_lifecycle_config(params = {}, options = {}) req.send_request(options) end + # Creates a job that optimizes a model for inference performance. To + # create the job, you provide the location of a source model, and you + # provide the settings for the optimization techniques that you want the + # job to apply. When the job completes successfully, SageMaker uploads + # the new optimized model to the output destination that you specify. + # + # For more information about how to use this action, and about the + # supported optimization techniques, see [Optimize model inference with + # Amazon SageMaker][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/model-optimize.html + # + # @option params [required, String] :optimization_job_name + # A custom name for the new optimization job. + # + # @option params [required, String] :role_arn + # The Amazon Resource Name (ARN) of an IAM role that enables Amazon + # SageMaker to perform tasks on your behalf. + # + # During model optimization, Amazon SageMaker needs your permission to: + # + # * Read input data from an S3 bucket + # + # * Write model artifacts to an S3 bucket + # + # * Write logs to Amazon CloudWatch Logs + # + # * Publish metrics to Amazon CloudWatch + # + # You grant permissions for all of these tasks to an IAM role. To pass + # this role to Amazon SageMaker, the caller of this API must have the + # `iam:PassRole` permission. For more information, see [Amazon SageMaker + # Roles.][1] + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html + # + # @option params [required, Types::OptimizationJobModelSource] :model_source + # The location of the source model to optimize with an optimization job. + # + # @option params [required, String] :deployment_instance_type + # The type of instance that hosts the optimized model that you create + # with the optimization job. + # + # @option params [Hash] :optimization_environment + # The environment variables to set in the model container. + # + # @option params [required, Array] :optimization_configs + # Settings for each of the optimization techniques that the job applies. + # + # @option params [required, Types::OptimizationJobOutputConfig] :output_config + # Details for where to store the optimized model that you create with + # the optimization job. + # + # @option params [required, Types::StoppingCondition] :stopping_condition + # Specifies a limit to how long a job can run. When the job reaches the + # time limit, SageMaker ends the job. Use this API to cap costs. + # + # To stop a training job, SageMaker sends the algorithm the `SIGTERM` + # signal, which delays job termination for 120 seconds. Algorithms can + # use this 120-second window to save the model artifacts, so the results + # of training are not lost. + # + # The training algorithms provided by SageMaker automatically save the + # intermediate results of a model training job when possible. This + # attempt to save artifacts is only a best effort case as model might + # not be in a state from which it can be saved. For example, if training + # has just started, the model might not be ready to save. When saved, + # this intermediate data is a valid model artifact. You can use it to + # create a model with `CreateModel`. + # + # The Neural Topic Model (NTM) currently does not support saving + # intermediate model artifacts. When training NTMs, make sure that the + # maximum runtime is sufficient for the training job to complete. + # + # + # + # @option params [Array] :tags + # A list of key-value pairs associated with the optimization job. For + # more information, see [Tagging Amazon Web Services resources][1] in + # the *Amazon Web Services General Reference Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + # + # @option params [Types::OptimizationVpcConfig] :vpc_config + # A VPC in Amazon VPC that your optimized model has access to. + # + # @return [Types::CreateOptimizationJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateOptimizationJobResponse#optimization_job_arn #optimization_job_arn} => String + # + # @example Request syntax with placeholder values + # + # resp = client.create_optimization_job({ + # optimization_job_name: "EntityName", # required + # role_arn: "RoleArn", # required + # model_source: { # required + # s3: { + # s3_uri: "S3Uri", + # model_access_config: { + # accept_eula: false, # required + # }, + # }, + # }, + # deployment_instance_type: "ml.p4d.24xlarge", # required, accepts ml.p4d.24xlarge, ml.p4de.24xlarge, ml.p5.48xlarge, ml.g5.xlarge, ml.g5.2xlarge, ml.g5.4xlarge, ml.g5.8xlarge, ml.g5.12xlarge, ml.g5.16xlarge, ml.g5.24xlarge, ml.g5.48xlarge, ml.g6.xlarge, ml.g6.2xlarge, ml.g6.4xlarge, ml.g6.8xlarge, ml.g6.12xlarge, ml.g6.16xlarge, ml.g6.24xlarge, ml.g6.48xlarge, ml.inf2.xlarge, ml.inf2.8xlarge, ml.inf2.24xlarge, ml.inf2.48xlarge, ml.trn1.2xlarge, ml.trn1.32xlarge, ml.trn1n.32xlarge + # optimization_environment: { + # "NonEmptyString256" => "String256", + # }, + # optimization_configs: [ # required + # { + # model_quantization_config: { + # image: "OptimizationContainerImage", + # override_environment: { + # "NonEmptyString256" => "String256", + # }, + # }, + # model_compilation_config: { + # image: "OptimizationContainerImage", + # override_environment: { + # "NonEmptyString256" => "String256", + # }, + # }, + # }, + # ], + # output_config: { # required + # kms_key_id: "KmsKeyId", + # s3_output_location: "S3Uri", # required + # }, + # stopping_condition: { # required + # max_runtime_in_seconds: 1, + # max_wait_time_in_seconds: 1, + # max_pending_time_in_seconds: 1, + # }, + # tags: [ + # { + # key: "TagKey", # required + # value: "TagValue", # required + # }, + # ], + # vpc_config: { + # security_group_ids: ["OptimizationVpcSecurityGroupId"], # required + # subnets: ["OptimizationVpcSubnetId"], # required + # }, + # }) + # + # @example Response structure + # + # resp.optimization_job_arn #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateOptimizationJob AWS API Documentation + # + # @overload create_optimization_job(params = {}) + # @param [Hash] params ({}) + def create_optimization_job(params = {}, options = {}) + req = build_request(:create_optimization_job, params) + req.send_request(options) + end + # Creates a pipeline using a JSON pipeline definition. # # @option params [required, String] :pipeline_name @@ -10585,6 +10784,28 @@ def delete_notebook_instance_lifecycle_config(params = {}, options = {}) req.send_request(options) end + # Deletes an optimization job. + # + # @option params [required, String] :optimization_job_name + # The name that you assigned to the optimization job. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_optimization_job({ + # optimization_job_name: "EntityName", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteOptimizationJob AWS API Documentation + # + # @overload delete_optimization_job(params = {}) + # @param [Hash] params ({}) + def delete_optimization_job(params = {}, options = {}) + req = build_request(:delete_optimization_job, params) + req.send_request(options) + end + # Deletes a pipeline if there are no running instances of the pipeline. # To delete a pipeline, you must stop all running instances of the # pipeline using the `StopPipelineExecution` API. When you delete a @@ -12326,6 +12547,8 @@ def describe_device_fleet(params = {}, options = {}) # resp.domain_settings.docker_settings.enable_docker_access #=> String, one of "ENABLED", "DISABLED" # resp.domain_settings.docker_settings.vpc_only_trusted_accounts #=> Array # resp.domain_settings.docker_settings.vpc_only_trusted_accounts[0] #=> String + # resp.domain_settings.amazon_q_settings.status #=> String, one of "ENABLED", "DISABLED" + # resp.domain_settings.amazon_q_settings.q_profile_arn #=> String # resp.app_network_access_type #=> String, one of "PublicInternetOnly", "VpcOnly" # resp.home_efs_file_system_kms_key_id #=> String # resp.subnet_ids #=> Array @@ -14219,6 +14442,13 @@ def describe_mlflow_tracking_server(params = {}, options = {}) # resp.primary_container.model_data_source.s3_data_source.compression_type #=> String, one of "None", "Gzip" # resp.primary_container.model_data_source.s3_data_source.model_access_config.accept_eula #=> Boolean # resp.primary_container.model_data_source.s3_data_source.hub_access_config.hub_content_arn #=> String + # resp.primary_container.additional_model_data_sources #=> Array + # resp.primary_container.additional_model_data_sources[0].channel_name #=> String + # resp.primary_container.additional_model_data_sources[0].s3_data_source.s3_uri #=> String + # resp.primary_container.additional_model_data_sources[0].s3_data_source.s3_data_type #=> String, one of "S3Prefix", "S3Object" + # resp.primary_container.additional_model_data_sources[0].s3_data_source.compression_type #=> String, one of "None", "Gzip" + # resp.primary_container.additional_model_data_sources[0].s3_data_source.model_access_config.accept_eula #=> Boolean + # resp.primary_container.additional_model_data_sources[0].s3_data_source.hub_access_config.hub_content_arn #=> String # resp.primary_container.environment #=> Hash # resp.primary_container.environment["EnvironmentKey"] #=> String # resp.primary_container.model_package_name #=> String @@ -14236,6 +14466,13 @@ def describe_mlflow_tracking_server(params = {}, options = {}) # resp.containers[0].model_data_source.s3_data_source.compression_type #=> String, one of "None", "Gzip" # resp.containers[0].model_data_source.s3_data_source.model_access_config.accept_eula #=> Boolean # resp.containers[0].model_data_source.s3_data_source.hub_access_config.hub_content_arn #=> String + # resp.containers[0].additional_model_data_sources #=> Array + # resp.containers[0].additional_model_data_sources[0].channel_name #=> String + # resp.containers[0].additional_model_data_sources[0].s3_data_source.s3_uri #=> String + # resp.containers[0].additional_model_data_sources[0].s3_data_source.s3_data_type #=> String, one of "S3Prefix", "S3Object" + # resp.containers[0].additional_model_data_sources[0].s3_data_source.compression_type #=> String, one of "None", "Gzip" + # resp.containers[0].additional_model_data_sources[0].s3_data_source.model_access_config.accept_eula #=> Boolean + # resp.containers[0].additional_model_data_sources[0].s3_data_source.hub_access_config.hub_content_arn #=> String # resp.containers[0].environment #=> Hash # resp.containers[0].environment["EnvironmentKey"] #=> String # resp.containers[0].model_package_name #=> String @@ -15189,6 +15426,80 @@ def describe_notebook_instance_lifecycle_config(params = {}, options = {}) req.send_request(options) end + # Provides the properties of the specified optimization job. + # + # @option params [required, String] :optimization_job_name + # The name that you assigned to the optimization job. + # + # @return [Types::DescribeOptimizationJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DescribeOptimizationJobResponse#optimization_job_arn #optimization_job_arn} => String + # * {Types::DescribeOptimizationJobResponse#optimization_job_status #optimization_job_status} => String + # * {Types::DescribeOptimizationJobResponse#optimization_start_time #optimization_start_time} => Time + # * {Types::DescribeOptimizationJobResponse#optimization_end_time #optimization_end_time} => Time + # * {Types::DescribeOptimizationJobResponse#creation_time #creation_time} => Time + # * {Types::DescribeOptimizationJobResponse#last_modified_time #last_modified_time} => Time + # * {Types::DescribeOptimizationJobResponse#failure_reason #failure_reason} => String + # * {Types::DescribeOptimizationJobResponse#optimization_job_name #optimization_job_name} => String + # * {Types::DescribeOptimizationJobResponse#model_source #model_source} => Types::OptimizationJobModelSource + # * {Types::DescribeOptimizationJobResponse#optimization_environment #optimization_environment} => Hash<String,String> + # * {Types::DescribeOptimizationJobResponse#deployment_instance_type #deployment_instance_type} => String + # * {Types::DescribeOptimizationJobResponse#optimization_configs #optimization_configs} => Array<Types::OptimizationConfig> + # * {Types::DescribeOptimizationJobResponse#output_config #output_config} => Types::OptimizationJobOutputConfig + # * {Types::DescribeOptimizationJobResponse#optimization_output #optimization_output} => Types::OptimizationOutput + # * {Types::DescribeOptimizationJobResponse#role_arn #role_arn} => String + # * {Types::DescribeOptimizationJobResponse#stopping_condition #stopping_condition} => Types::StoppingCondition + # * {Types::DescribeOptimizationJobResponse#vpc_config #vpc_config} => Types::OptimizationVpcConfig + # + # @example Request syntax with placeholder values + # + # resp = client.describe_optimization_job({ + # optimization_job_name: "EntityName", # required + # }) + # + # @example Response structure + # + # resp.optimization_job_arn #=> String + # resp.optimization_job_status #=> String, one of "INPROGRESS", "COMPLETED", "FAILED", "STARTING", "STOPPING", "STOPPED" + # resp.optimization_start_time #=> Time + # resp.optimization_end_time #=> Time + # resp.creation_time #=> Time + # resp.last_modified_time #=> Time + # resp.failure_reason #=> String + # resp.optimization_job_name #=> String + # resp.model_source.s3.s3_uri #=> String + # resp.model_source.s3.model_access_config.accept_eula #=> Boolean + # resp.optimization_environment #=> Hash + # resp.optimization_environment["NonEmptyString256"] #=> String + # resp.deployment_instance_type #=> String, one of "ml.p4d.24xlarge", "ml.p4de.24xlarge", "ml.p5.48xlarge", "ml.g5.xlarge", "ml.g5.2xlarge", "ml.g5.4xlarge", "ml.g5.8xlarge", "ml.g5.12xlarge", "ml.g5.16xlarge", "ml.g5.24xlarge", "ml.g5.48xlarge", "ml.g6.xlarge", "ml.g6.2xlarge", "ml.g6.4xlarge", "ml.g6.8xlarge", "ml.g6.12xlarge", "ml.g6.16xlarge", "ml.g6.24xlarge", "ml.g6.48xlarge", "ml.inf2.xlarge", "ml.inf2.8xlarge", "ml.inf2.24xlarge", "ml.inf2.48xlarge", "ml.trn1.2xlarge", "ml.trn1.32xlarge", "ml.trn1n.32xlarge" + # resp.optimization_configs #=> Array + # resp.optimization_configs[0].model_quantization_config.image #=> String + # resp.optimization_configs[0].model_quantization_config.override_environment #=> Hash + # resp.optimization_configs[0].model_quantization_config.override_environment["NonEmptyString256"] #=> String + # resp.optimization_configs[0].model_compilation_config.image #=> String + # resp.optimization_configs[0].model_compilation_config.override_environment #=> Hash + # resp.optimization_configs[0].model_compilation_config.override_environment["NonEmptyString256"] #=> String + # resp.output_config.kms_key_id #=> String + # resp.output_config.s3_output_location #=> String + # resp.optimization_output.recommended_inference_image #=> String + # resp.role_arn #=> String + # resp.stopping_condition.max_runtime_in_seconds #=> Integer + # resp.stopping_condition.max_wait_time_in_seconds #=> Integer + # resp.stopping_condition.max_pending_time_in_seconds #=> Integer + # resp.vpc_config.security_group_ids #=> Array + # resp.vpc_config.security_group_ids[0] #=> String + # resp.vpc_config.subnets #=> Array + # resp.vpc_config.subnets[0] #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeOptimizationJob AWS API Documentation + # + # @overload describe_optimization_job(params = {}) + # @param [Hash] params ({}) + def describe_optimization_job(params = {}, options = {}) + req = build_request(:describe_optimization_job, params) + req.send_request(options) + end + # Describes the details of a pipeline. # # @option params [required, String] :pipeline_name @@ -21274,6 +21585,100 @@ def list_notebook_instances(params = {}, options = {}) req.send_request(options) end + # Lists the optimization jobs in your account and their properties. + # + # @option params [String] :next_token + # A token that you use to get the next set of results following a + # truncated response. If the response to the previous request was + # truncated, that response provides the value for this token. + # + # @option params [Integer] :max_results + # The maximum number of optimization jobs to return in the response. The + # default is 50. + # + # @option params [Time,DateTime,Date,Integer,String] :creation_time_after + # Filters the results to only those optimization jobs that were created + # after the specified time. + # + # @option params [Time,DateTime,Date,Integer,String] :creation_time_before + # Filters the results to only those optimization jobs that were created + # before the specified time. + # + # @option params [Time,DateTime,Date,Integer,String] :last_modified_time_after + # Filters the results to only those optimization jobs that were updated + # after the specified time. + # + # @option params [Time,DateTime,Date,Integer,String] :last_modified_time_before + # Filters the results to only those optimization jobs that were updated + # before the specified time. + # + # @option params [String] :optimization_contains + # Filters the results to only those optimization jobs that apply the + # specified optimization techniques. You can specify either + # `Quantization` or `Compilation`. + # + # @option params [String] :name_contains + # Filters the results to only those optimization jobs with a name that + # contains the specified string. + # + # @option params [String] :status_equals + # Filters the results to only those optimization jobs with the specified + # status. + # + # @option params [String] :sort_by + # The field by which to sort the optimization jobs in the response. The + # default is `CreationTime` + # + # @option params [String] :sort_order + # The sort order for results. The default is `Ascending` + # + # @return [Types::ListOptimizationJobsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListOptimizationJobsResponse#optimization_job_summaries #optimization_job_summaries} => Array<Types::OptimizationJobSummary> + # * {Types::ListOptimizationJobsResponse#next_token #next_token} => String + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # @example Request syntax with placeholder values + # + # resp = client.list_optimization_jobs({ + # next_token: "NextToken", + # max_results: 1, + # creation_time_after: Time.now, + # creation_time_before: Time.now, + # last_modified_time_after: Time.now, + # last_modified_time_before: Time.now, + # optimization_contains: "NameContains", + # name_contains: "NameContains", + # status_equals: "INPROGRESS", # accepts INPROGRESS, COMPLETED, FAILED, STARTING, STOPPING, STOPPED + # sort_by: "Name", # accepts Name, CreationTime, Status + # sort_order: "Ascending", # accepts Ascending, Descending + # }) + # + # @example Response structure + # + # resp.optimization_job_summaries #=> Array + # resp.optimization_job_summaries[0].optimization_job_name #=> String + # resp.optimization_job_summaries[0].optimization_job_arn #=> String + # resp.optimization_job_summaries[0].creation_time #=> Time + # resp.optimization_job_summaries[0].optimization_job_status #=> String, one of "INPROGRESS", "COMPLETED", "FAILED", "STARTING", "STOPPING", "STOPPED" + # resp.optimization_job_summaries[0].optimization_start_time #=> Time + # resp.optimization_job_summaries[0].optimization_end_time #=> Time + # resp.optimization_job_summaries[0].last_modified_time #=> Time + # resp.optimization_job_summaries[0].deployment_instance_type #=> String, one of "ml.p4d.24xlarge", "ml.p4de.24xlarge", "ml.p5.48xlarge", "ml.g5.xlarge", "ml.g5.2xlarge", "ml.g5.4xlarge", "ml.g5.8xlarge", "ml.g5.12xlarge", "ml.g5.16xlarge", "ml.g5.24xlarge", "ml.g5.48xlarge", "ml.g6.xlarge", "ml.g6.2xlarge", "ml.g6.4xlarge", "ml.g6.8xlarge", "ml.g6.12xlarge", "ml.g6.16xlarge", "ml.g6.24xlarge", "ml.g6.48xlarge", "ml.inf2.xlarge", "ml.inf2.8xlarge", "ml.inf2.24xlarge", "ml.inf2.48xlarge", "ml.trn1.2xlarge", "ml.trn1.32xlarge", "ml.trn1n.32xlarge" + # resp.optimization_job_summaries[0].optimization_types #=> Array + # resp.optimization_job_summaries[0].optimization_types[0] #=> String + # resp.next_token #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListOptimizationJobs AWS API Documentation + # + # @overload list_optimization_jobs(params = {}) + # @param [Hash] params ({}) + def list_optimization_jobs(params = {}, options = {}) + req = build_request(:list_optimization_jobs, params) + req.send_request(options) + end + # Gets a list of `PipeLineExecutionStep` objects. # # @option params [String] :pipeline_execution_arn @@ -23793,6 +24198,28 @@ def stop_notebook_instance(params = {}, options = {}) req.send_request(options) end + # Ends a running inference optimization job. + # + # @option params [required, String] :optimization_job_name + # The name that you assigned to the optimization job. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.stop_optimization_job({ + # optimization_job_name: "EntityName", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopOptimizationJob AWS API Documentation + # + # @overload stop_optimization_job(params = {}) + # @param [Hash] params ({}) + def stop_optimization_job(params = {}, options = {}) + req = build_request(:stop_optimization_job, params) + req.send_request(options) + end + # Stops a pipeline execution. # # **Callback Step** @@ -24585,6 +25012,10 @@ def update_devices(params = {}, options = {}) # enable_docker_access: "ENABLED", # accepts ENABLED, DISABLED # vpc_only_trusted_accounts: ["AccountId"], # }, + # amazon_q_settings: { + # status: "ENABLED", # accepts ENABLED, DISABLED + # q_profile_arn: "QProfileArn", + # }, # }, # app_security_group_management: "Service", # accepts Service, Customer # default_space_settings: { @@ -27047,7 +27478,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-sagemaker' - context[:gem_version] = '1.252.0' + context[:gem_version] = '1.253.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client_api.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client_api.rb index 60347eb106f..a23b252781d 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client_api.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/client_api.rb @@ -28,6 +28,9 @@ module ClientApi AdditionalCodeRepositoryNamesOrUrls = Shapes::ListShape.new(name: 'AdditionalCodeRepositoryNamesOrUrls') AdditionalInferenceSpecificationDefinition = Shapes::StructureShape.new(name: 'AdditionalInferenceSpecificationDefinition') AdditionalInferenceSpecifications = Shapes::ListShape.new(name: 'AdditionalInferenceSpecifications') + AdditionalModelChannelName = Shapes::StringShape.new(name: 'AdditionalModelChannelName') + AdditionalModelDataSource = Shapes::StructureShape.new(name: 'AdditionalModelDataSource') + AdditionalModelDataSources = Shapes::ListShape.new(name: 'AdditionalModelDataSources') AdditionalS3DataSource = Shapes::StructureShape.new(name: 'AdditionalS3DataSource') AdditionalS3DataSourceDataType = Shapes::StringShape.new(name: 'AdditionalS3DataSourceDataType') AgentVersion = Shapes::StructureShape.new(name: 'AgentVersion') @@ -50,6 +53,7 @@ module ClientApi AlgorithmValidationProfile = Shapes::StructureShape.new(name: 'AlgorithmValidationProfile') AlgorithmValidationProfiles = Shapes::ListShape.new(name: 'AlgorithmValidationProfiles') AlgorithmValidationSpecification = Shapes::StructureShape.new(name: 'AlgorithmValidationSpecification') + AmazonQSettings = Shapes::StructureShape.new(name: 'AmazonQSettings') AnnotationConsolidationConfig = Shapes::StructureShape.new(name: 'AnnotationConsolidationConfig') AppArn = Shapes::StringShape.new(name: 'AppArn') AppDetails = Shapes::StructureShape.new(name: 'AppDetails') @@ -432,6 +436,8 @@ module ClientApi CreateNotebookInstanceLifecycleConfigInput = Shapes::StructureShape.new(name: 'CreateNotebookInstanceLifecycleConfigInput') CreateNotebookInstanceLifecycleConfigOutput = Shapes::StructureShape.new(name: 'CreateNotebookInstanceLifecycleConfigOutput') CreateNotebookInstanceOutput = Shapes::StructureShape.new(name: 'CreateNotebookInstanceOutput') + CreateOptimizationJobRequest = Shapes::StructureShape.new(name: 'CreateOptimizationJobRequest') + CreateOptimizationJobResponse = Shapes::StructureShape.new(name: 'CreateOptimizationJobResponse') CreatePipelineRequest = Shapes::StructureShape.new(name: 'CreatePipelineRequest') CreatePipelineResponse = Shapes::StructureShape.new(name: 'CreatePipelineResponse') CreatePresignedDomainUrlRequest = Shapes::StructureShape.new(name: 'CreatePresignedDomainUrlRequest') @@ -558,6 +564,7 @@ module ClientApi DeleteMonitoringScheduleRequest = Shapes::StructureShape.new(name: 'DeleteMonitoringScheduleRequest') DeleteNotebookInstanceInput = Shapes::StructureShape.new(name: 'DeleteNotebookInstanceInput') DeleteNotebookInstanceLifecycleConfigInput = Shapes::StructureShape.new(name: 'DeleteNotebookInstanceLifecycleConfigInput') + DeleteOptimizationJobRequest = Shapes::StructureShape.new(name: 'DeleteOptimizationJobRequest') DeletePipelineRequest = Shapes::StructureShape.new(name: 'DeletePipelineRequest') DeletePipelineResponse = Shapes::StructureShape.new(name: 'DeletePipelineResponse') DeleteProjectInput = Shapes::StructureShape.new(name: 'DeleteProjectInput') @@ -681,6 +688,8 @@ module ClientApi DescribeNotebookInstanceLifecycleConfigInput = Shapes::StructureShape.new(name: 'DescribeNotebookInstanceLifecycleConfigInput') DescribeNotebookInstanceLifecycleConfigOutput = Shapes::StructureShape.new(name: 'DescribeNotebookInstanceLifecycleConfigOutput') DescribeNotebookInstanceOutput = Shapes::StructureShape.new(name: 'DescribeNotebookInstanceOutput') + DescribeOptimizationJobRequest = Shapes::StructureShape.new(name: 'DescribeOptimizationJobRequest') + DescribeOptimizationJobResponse = Shapes::StructureShape.new(name: 'DescribeOptimizationJobResponse') DescribePipelineDefinitionForExecutionRequest = Shapes::StructureShape.new(name: 'DescribePipelineDefinitionForExecutionRequest') DescribePipelineDefinitionForExecutionResponse = Shapes::StructureShape.new(name: 'DescribePipelineDefinitionForExecutionResponse') DescribePipelineExecutionRequest = Shapes::StructureShape.new(name: 'DescribePipelineExecutionRequest') @@ -1303,6 +1312,9 @@ module ClientApi ListNotebookInstanceLifecycleConfigsOutput = Shapes::StructureShape.new(name: 'ListNotebookInstanceLifecycleConfigsOutput') ListNotebookInstancesInput = Shapes::StructureShape.new(name: 'ListNotebookInstancesInput') ListNotebookInstancesOutput = Shapes::StructureShape.new(name: 'ListNotebookInstancesOutput') + ListOptimizationJobsRequest = Shapes::StructureShape.new(name: 'ListOptimizationJobsRequest') + ListOptimizationJobsResponse = Shapes::StructureShape.new(name: 'ListOptimizationJobsResponse') + ListOptimizationJobsSortBy = Shapes::StringShape.new(name: 'ListOptimizationJobsSortBy') ListPipelineExecutionStepsRequest = Shapes::StructureShape.new(name: 'ListPipelineExecutionStepsRequest') ListPipelineExecutionStepsResponse = Shapes::StructureShape.new(name: 'ListPipelineExecutionStepsResponse') ListPipelineExecutionsRequest = Shapes::StructureShape.new(name: 'ListPipelineExecutionsRequest') @@ -1425,6 +1437,7 @@ module ClientApi ModelCardVersionSummary = Shapes::StructureShape.new(name: 'ModelCardVersionSummary') ModelCardVersionSummaryList = Shapes::ListShape.new(name: 'ModelCardVersionSummaryList') ModelClientConfig = Shapes::StructureShape.new(name: 'ModelClientConfig') + ModelCompilationConfig = Shapes::StructureShape.new(name: 'ModelCompilationConfig') ModelCompressionType = Shapes::StringShape.new(name: 'ModelCompressionType') ModelConfiguration = Shapes::StructureShape.new(name: 'ModelConfiguration') ModelDashboardEndpoint = Shapes::StructureShape.new(name: 'ModelDashboardEndpoint') @@ -1489,6 +1502,7 @@ module ClientApi ModelQualityAppSpecification = Shapes::StructureShape.new(name: 'ModelQualityAppSpecification') ModelQualityBaselineConfig = Shapes::StructureShape.new(name: 'ModelQualityBaselineConfig') ModelQualityJobInput = Shapes::StructureShape.new(name: 'ModelQualityJobInput') + ModelQuantizationConfig = Shapes::StructureShape.new(name: 'ModelQuantizationConfig') ModelRegisterSettings = Shapes::StructureShape.new(name: 'ModelRegisterSettings') ModelSetupTime = Shapes::IntegerShape.new(name: 'ModelSetupTime') ModelSortKey = Shapes::StringShape.new(name: 'ModelSortKey') @@ -1615,6 +1629,28 @@ module ClientApi OnlineStoreSecurityConfig = Shapes::StructureShape.new(name: 'OnlineStoreSecurityConfig') OnlineStoreTotalSizeBytes = Shapes::IntegerShape.new(name: 'OnlineStoreTotalSizeBytes') Operator = Shapes::StringShape.new(name: 'Operator') + OptimizationConfig = Shapes::UnionShape.new(name: 'OptimizationConfig') + OptimizationConfigs = Shapes::ListShape.new(name: 'OptimizationConfigs') + OptimizationContainerImage = Shapes::StringShape.new(name: 'OptimizationContainerImage') + OptimizationJobArn = Shapes::StringShape.new(name: 'OptimizationJobArn') + OptimizationJobDeploymentInstanceType = Shapes::StringShape.new(name: 'OptimizationJobDeploymentInstanceType') + OptimizationJobEnvironmentVariables = Shapes::MapShape.new(name: 'OptimizationJobEnvironmentVariables') + OptimizationJobModelSource = Shapes::StructureShape.new(name: 'OptimizationJobModelSource') + OptimizationJobModelSourceS3 = Shapes::StructureShape.new(name: 'OptimizationJobModelSourceS3') + OptimizationJobOutputConfig = Shapes::StructureShape.new(name: 'OptimizationJobOutputConfig') + OptimizationJobStatus = Shapes::StringShape.new(name: 'OptimizationJobStatus') + OptimizationJobSummaries = Shapes::ListShape.new(name: 'OptimizationJobSummaries') + OptimizationJobSummary = Shapes::StructureShape.new(name: 'OptimizationJobSummary') + OptimizationModelAcceptEula = Shapes::BooleanShape.new(name: 'OptimizationModelAcceptEula') + OptimizationModelAccessConfig = Shapes::StructureShape.new(name: 'OptimizationModelAccessConfig') + OptimizationOutput = Shapes::StructureShape.new(name: 'OptimizationOutput') + OptimizationType = Shapes::StringShape.new(name: 'OptimizationType') + OptimizationTypes = Shapes::ListShape.new(name: 'OptimizationTypes') + OptimizationVpcConfig = Shapes::StructureShape.new(name: 'OptimizationVpcConfig') + OptimizationVpcSecurityGroupId = Shapes::StringShape.new(name: 'OptimizationVpcSecurityGroupId') + OptimizationVpcSecurityGroupIds = Shapes::ListShape.new(name: 'OptimizationVpcSecurityGroupIds') + OptimizationVpcSubnetId = Shapes::StringShape.new(name: 'OptimizationVpcSubnetId') + OptimizationVpcSubnets = Shapes::ListShape.new(name: 'OptimizationVpcSubnets') OptionalDouble = Shapes::FloatShape.new(name: 'OptionalDouble') OptionalInteger = Shapes::IntegerShape.new(name: 'OptionalInteger') OptionalVolumeSizeInGB = Shapes::IntegerShape.new(name: 'OptionalVolumeSizeInGB') @@ -1759,6 +1795,7 @@ module ClientApi PublicWorkforceTaskPrice = Shapes::StructureShape.new(name: 'PublicWorkforceTaskPrice') PutModelPackageGroupPolicyInput = Shapes::StructureShape.new(name: 'PutModelPackageGroupPolicyInput') PutModelPackageGroupPolicyOutput = Shapes::StructureShape.new(name: 'PutModelPackageGroupPolicyOutput') + QProfileArn = Shapes::StringShape.new(name: 'QProfileArn') QualityCheckStepMetadata = Shapes::StructureShape.new(name: 'QualityCheckStepMetadata') QueryFilters = Shapes::StructureShape.new(name: 'QueryFilters') QueryLineageMaxDepth = Shapes::IntegerShape.new(name: 'QueryLineageMaxDepth') @@ -2001,6 +2038,7 @@ module ClientApi StopMlflowTrackingServerResponse = Shapes::StructureShape.new(name: 'StopMlflowTrackingServerResponse') StopMonitoringScheduleRequest = Shapes::StructureShape.new(name: 'StopMonitoringScheduleRequest') StopNotebookInstanceInput = Shapes::StructureShape.new(name: 'StopNotebookInstanceInput') + StopOptimizationJobRequest = Shapes::StructureShape.new(name: 'StopOptimizationJobRequest') StopPipelineExecutionRequest = Shapes::StructureShape.new(name: 'StopPipelineExecutionRequest') StopPipelineExecutionResponse = Shapes::StructureShape.new(name: 'StopPipelineExecutionResponse') StopProcessingJobRequest = Shapes::StructureShape.new(name: 'StopProcessingJobRequest') @@ -2361,6 +2399,12 @@ module ClientApi AdditionalInferenceSpecifications.member = Shapes::ShapeRef.new(shape: AdditionalInferenceSpecificationDefinition) + AdditionalModelDataSource.add_member(:channel_name, Shapes::ShapeRef.new(shape: AdditionalModelChannelName, required: true, location_name: "ChannelName")) + AdditionalModelDataSource.add_member(:s3_data_source, Shapes::ShapeRef.new(shape: S3ModelDataSource, required: true, location_name: "S3DataSource")) + AdditionalModelDataSource.struct_class = Types::AdditionalModelDataSource + + AdditionalModelDataSources.member = Shapes::ShapeRef.new(shape: AdditionalModelDataSource) + AdditionalS3DataSource.add_member(:s3_data_type, Shapes::ShapeRef.new(shape: AdditionalS3DataSourceDataType, required: true, location_name: "S3DataType")) AdditionalS3DataSource.add_member(:s3_uri, Shapes::ShapeRef.new(shape: S3Uri, required: true, location_name: "S3Uri")) AdditionalS3DataSource.add_member(:compression_type, Shapes::ShapeRef.new(shape: CompressionType, location_name: "CompressionType")) @@ -2421,6 +2465,10 @@ module ClientApi AlgorithmValidationSpecification.add_member(:validation_profiles, Shapes::ShapeRef.new(shape: AlgorithmValidationProfiles, required: true, location_name: "ValidationProfiles")) AlgorithmValidationSpecification.struct_class = Types::AlgorithmValidationSpecification + AmazonQSettings.add_member(:status, Shapes::ShapeRef.new(shape: FeatureStatus, location_name: "Status")) + AmazonQSettings.add_member(:q_profile_arn, Shapes::ShapeRef.new(shape: QProfileArn, location_name: "QProfileArn")) + AmazonQSettings.struct_class = Types::AmazonQSettings + AnnotationConsolidationConfig.add_member(:annotation_consolidation_lambda_arn, Shapes::ShapeRef.new(shape: LambdaFunctionArn, required: true, location_name: "AnnotationConsolidationLambdaArn")) AnnotationConsolidationConfig.struct_class = Types::AnnotationConsolidationConfig @@ -3040,6 +3088,7 @@ module ClientApi ContainerDefinition.add_member(:mode, Shapes::ShapeRef.new(shape: ContainerMode, location_name: "Mode")) ContainerDefinition.add_member(:model_data_url, Shapes::ShapeRef.new(shape: Url, location_name: "ModelDataUrl")) ContainerDefinition.add_member(:model_data_source, Shapes::ShapeRef.new(shape: ModelDataSource, location_name: "ModelDataSource")) + ContainerDefinition.add_member(:additional_model_data_sources, Shapes::ShapeRef.new(shape: AdditionalModelDataSources, location_name: "AdditionalModelDataSources")) ContainerDefinition.add_member(:environment, Shapes::ShapeRef.new(shape: EnvironmentMap, location_name: "Environment")) ContainerDefinition.add_member(:model_package_name, Shapes::ShapeRef.new(shape: VersionedArnOrName, location_name: "ModelPackageName")) ContainerDefinition.add_member(:inference_specification_name, Shapes::ShapeRef.new(shape: InferenceSpecificationName, location_name: "InferenceSpecificationName")) @@ -3624,6 +3673,21 @@ module ClientApi CreateNotebookInstanceOutput.add_member(:notebook_instance_arn, Shapes::ShapeRef.new(shape: NotebookInstanceArn, location_name: "NotebookInstanceArn")) CreateNotebookInstanceOutput.struct_class = Types::CreateNotebookInstanceOutput + CreateOptimizationJobRequest.add_member(:optimization_job_name, Shapes::ShapeRef.new(shape: EntityName, required: true, location_name: "OptimizationJobName")) + CreateOptimizationJobRequest.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "RoleArn")) + CreateOptimizationJobRequest.add_member(:model_source, Shapes::ShapeRef.new(shape: OptimizationJobModelSource, required: true, location_name: "ModelSource")) + CreateOptimizationJobRequest.add_member(:deployment_instance_type, Shapes::ShapeRef.new(shape: OptimizationJobDeploymentInstanceType, required: true, location_name: "DeploymentInstanceType")) + CreateOptimizationJobRequest.add_member(:optimization_environment, Shapes::ShapeRef.new(shape: OptimizationJobEnvironmentVariables, location_name: "OptimizationEnvironment")) + CreateOptimizationJobRequest.add_member(:optimization_configs, Shapes::ShapeRef.new(shape: OptimizationConfigs, required: true, location_name: "OptimizationConfigs")) + CreateOptimizationJobRequest.add_member(:output_config, Shapes::ShapeRef.new(shape: OptimizationJobOutputConfig, required: true, location_name: "OutputConfig")) + CreateOptimizationJobRequest.add_member(:stopping_condition, Shapes::ShapeRef.new(shape: StoppingCondition, required: true, location_name: "StoppingCondition")) + CreateOptimizationJobRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "Tags")) + CreateOptimizationJobRequest.add_member(:vpc_config, Shapes::ShapeRef.new(shape: OptimizationVpcConfig, location_name: "VpcConfig")) + CreateOptimizationJobRequest.struct_class = Types::CreateOptimizationJobRequest + + CreateOptimizationJobResponse.add_member(:optimization_job_arn, Shapes::ShapeRef.new(shape: OptimizationJobArn, required: true, location_name: "OptimizationJobArn")) + CreateOptimizationJobResponse.struct_class = Types::CreateOptimizationJobResponse + CreatePipelineRequest.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, required: true, location_name: "PipelineName")) CreatePipelineRequest.add_member(:pipeline_display_name, Shapes::ShapeRef.new(shape: PipelineName, location_name: "PipelineDisplayName")) CreatePipelineRequest.add_member(:pipeline_definition, Shapes::ShapeRef.new(shape: PipelineDefinition, location_name: "PipelineDefinition")) @@ -4132,6 +4196,9 @@ module ClientApi DeleteNotebookInstanceLifecycleConfigInput.add_member(:notebook_instance_lifecycle_config_name, Shapes::ShapeRef.new(shape: NotebookInstanceLifecycleConfigName, required: true, location_name: "NotebookInstanceLifecycleConfigName")) DeleteNotebookInstanceLifecycleConfigInput.struct_class = Types::DeleteNotebookInstanceLifecycleConfigInput + DeleteOptimizationJobRequest.add_member(:optimization_job_name, Shapes::ShapeRef.new(shape: EntityName, required: true, location_name: "OptimizationJobName")) + DeleteOptimizationJobRequest.struct_class = Types::DeleteOptimizationJobRequest + DeletePipelineRequest.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, required: true, location_name: "PipelineName")) DeletePipelineRequest.add_member(:client_request_token, Shapes::ShapeRef.new(shape: IdempotencyToken, required: true, location_name: "ClientRequestToken", metadata: {"idempotencyToken"=>true})) DeletePipelineRequest.struct_class = Types::DeletePipelineRequest @@ -5056,6 +5123,28 @@ module ClientApi DescribeNotebookInstanceOutput.add_member(:instance_metadata_service_configuration, Shapes::ShapeRef.new(shape: InstanceMetadataServiceConfiguration, location_name: "InstanceMetadataServiceConfiguration")) DescribeNotebookInstanceOutput.struct_class = Types::DescribeNotebookInstanceOutput + DescribeOptimizationJobRequest.add_member(:optimization_job_name, Shapes::ShapeRef.new(shape: EntityName, required: true, location_name: "OptimizationJobName")) + DescribeOptimizationJobRequest.struct_class = Types::DescribeOptimizationJobRequest + + DescribeOptimizationJobResponse.add_member(:optimization_job_arn, Shapes::ShapeRef.new(shape: OptimizationJobArn, required: true, location_name: "OptimizationJobArn")) + DescribeOptimizationJobResponse.add_member(:optimization_job_status, Shapes::ShapeRef.new(shape: OptimizationJobStatus, required: true, location_name: "OptimizationJobStatus")) + DescribeOptimizationJobResponse.add_member(:optimization_start_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "OptimizationStartTime")) + DescribeOptimizationJobResponse.add_member(:optimization_end_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "OptimizationEndTime")) + DescribeOptimizationJobResponse.add_member(:creation_time, Shapes::ShapeRef.new(shape: CreationTime, required: true, location_name: "CreationTime")) + DescribeOptimizationJobResponse.add_member(:last_modified_time, Shapes::ShapeRef.new(shape: LastModifiedTime, required: true, location_name: "LastModifiedTime")) + DescribeOptimizationJobResponse.add_member(:failure_reason, Shapes::ShapeRef.new(shape: FailureReason, location_name: "FailureReason")) + DescribeOptimizationJobResponse.add_member(:optimization_job_name, Shapes::ShapeRef.new(shape: EntityName, required: true, location_name: "OptimizationJobName")) + DescribeOptimizationJobResponse.add_member(:model_source, Shapes::ShapeRef.new(shape: OptimizationJobModelSource, required: true, location_name: "ModelSource")) + DescribeOptimizationJobResponse.add_member(:optimization_environment, Shapes::ShapeRef.new(shape: OptimizationJobEnvironmentVariables, location_name: "OptimizationEnvironment")) + DescribeOptimizationJobResponse.add_member(:deployment_instance_type, Shapes::ShapeRef.new(shape: OptimizationJobDeploymentInstanceType, required: true, location_name: "DeploymentInstanceType")) + DescribeOptimizationJobResponse.add_member(:optimization_configs, Shapes::ShapeRef.new(shape: OptimizationConfigs, required: true, location_name: "OptimizationConfigs")) + DescribeOptimizationJobResponse.add_member(:output_config, Shapes::ShapeRef.new(shape: OptimizationJobOutputConfig, required: true, location_name: "OutputConfig")) + DescribeOptimizationJobResponse.add_member(:optimization_output, Shapes::ShapeRef.new(shape: OptimizationOutput, location_name: "OptimizationOutput")) + DescribeOptimizationJobResponse.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "RoleArn")) + DescribeOptimizationJobResponse.add_member(:stopping_condition, Shapes::ShapeRef.new(shape: StoppingCondition, required: true, location_name: "StoppingCondition")) + DescribeOptimizationJobResponse.add_member(:vpc_config, Shapes::ShapeRef.new(shape: OptimizationVpcConfig, location_name: "VpcConfig")) + DescribeOptimizationJobResponse.struct_class = Types::DescribeOptimizationJobResponse + DescribePipelineDefinitionForExecutionRequest.add_member(:pipeline_execution_arn, Shapes::ShapeRef.new(shape: PipelineExecutionArn, required: true, location_name: "PipelineExecutionArn")) DescribePipelineDefinitionForExecutionRequest.struct_class = Types::DescribePipelineDefinitionForExecutionRequest @@ -5417,12 +5506,14 @@ module ClientApi DomainSettings.add_member(:r_studio_server_pro_domain_settings, Shapes::ShapeRef.new(shape: RStudioServerProDomainSettings, location_name: "RStudioServerProDomainSettings")) DomainSettings.add_member(:execution_role_identity_config, Shapes::ShapeRef.new(shape: ExecutionRoleIdentityConfig, location_name: "ExecutionRoleIdentityConfig")) DomainSettings.add_member(:docker_settings, Shapes::ShapeRef.new(shape: DockerSettings, location_name: "DockerSettings")) + DomainSettings.add_member(:amazon_q_settings, Shapes::ShapeRef.new(shape: AmazonQSettings, location_name: "AmazonQSettings")) DomainSettings.struct_class = Types::DomainSettings DomainSettingsForUpdate.add_member(:r_studio_server_pro_domain_settings_for_update, Shapes::ShapeRef.new(shape: RStudioServerProDomainSettingsForUpdate, location_name: "RStudioServerProDomainSettingsForUpdate")) DomainSettingsForUpdate.add_member(:execution_role_identity_config, Shapes::ShapeRef.new(shape: ExecutionRoleIdentityConfig, location_name: "ExecutionRoleIdentityConfig")) DomainSettingsForUpdate.add_member(:security_group_ids, Shapes::ShapeRef.new(shape: DomainSecurityGroupIds, location_name: "SecurityGroupIds")) DomainSettingsForUpdate.add_member(:docker_settings, Shapes::ShapeRef.new(shape: DockerSettings, location_name: "DockerSettings")) + DomainSettingsForUpdate.add_member(:amazon_q_settings, Shapes::ShapeRef.new(shape: AmazonQSettings, location_name: "AmazonQSettings")) DomainSettingsForUpdate.struct_class = Types::DomainSettingsForUpdate DriftCheckBaselines.add_member(:bias, Shapes::ShapeRef.new(shape: DriftCheckBias, location_name: "Bias")) @@ -7283,6 +7374,23 @@ module ClientApi ListNotebookInstancesOutput.add_member(:notebook_instances, Shapes::ShapeRef.new(shape: NotebookInstanceSummaryList, location_name: "NotebookInstances")) ListNotebookInstancesOutput.struct_class = Types::ListNotebookInstancesOutput + ListOptimizationJobsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) + ListOptimizationJobsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults", metadata: {"box"=>true})) + ListOptimizationJobsRequest.add_member(:creation_time_after, Shapes::ShapeRef.new(shape: CreationTime, location_name: "CreationTimeAfter")) + ListOptimizationJobsRequest.add_member(:creation_time_before, Shapes::ShapeRef.new(shape: CreationTime, location_name: "CreationTimeBefore")) + ListOptimizationJobsRequest.add_member(:last_modified_time_after, Shapes::ShapeRef.new(shape: LastModifiedTime, location_name: "LastModifiedTimeAfter")) + ListOptimizationJobsRequest.add_member(:last_modified_time_before, Shapes::ShapeRef.new(shape: LastModifiedTime, location_name: "LastModifiedTimeBefore")) + ListOptimizationJobsRequest.add_member(:optimization_contains, Shapes::ShapeRef.new(shape: NameContains, location_name: "OptimizationContains")) + ListOptimizationJobsRequest.add_member(:name_contains, Shapes::ShapeRef.new(shape: NameContains, location_name: "NameContains")) + ListOptimizationJobsRequest.add_member(:status_equals, Shapes::ShapeRef.new(shape: OptimizationJobStatus, location_name: "StatusEquals")) + ListOptimizationJobsRequest.add_member(:sort_by, Shapes::ShapeRef.new(shape: ListOptimizationJobsSortBy, location_name: "SortBy")) + ListOptimizationJobsRequest.add_member(:sort_order, Shapes::ShapeRef.new(shape: SortOrder, location_name: "SortOrder")) + ListOptimizationJobsRequest.struct_class = Types::ListOptimizationJobsRequest + + ListOptimizationJobsResponse.add_member(:optimization_job_summaries, Shapes::ShapeRef.new(shape: OptimizationJobSummaries, required: true, location_name: "OptimizationJobSummaries")) + ListOptimizationJobsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) + ListOptimizationJobsResponse.struct_class = Types::ListOptimizationJobsResponse + ListPipelineExecutionStepsRequest.add_member(:pipeline_execution_arn, Shapes::ShapeRef.new(shape: PipelineExecutionArn, location_name: "PipelineExecutionArn")) ListPipelineExecutionStepsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) ListPipelineExecutionStepsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults")) @@ -7673,6 +7781,10 @@ module ClientApi ModelClientConfig.add_member(:invocations_max_retries, Shapes::ShapeRef.new(shape: InvocationsMaxRetries, location_name: "InvocationsMaxRetries")) ModelClientConfig.struct_class = Types::ModelClientConfig + ModelCompilationConfig.add_member(:image, Shapes::ShapeRef.new(shape: OptimizationContainerImage, location_name: "Image")) + ModelCompilationConfig.add_member(:override_environment, Shapes::ShapeRef.new(shape: OptimizationJobEnvironmentVariables, location_name: "OverrideEnvironment")) + ModelCompilationConfig.struct_class = Types::ModelCompilationConfig + ModelConfiguration.add_member(:inference_specification_name, Shapes::ShapeRef.new(shape: InferenceSpecificationName, location_name: "InferenceSpecificationName")) ModelConfiguration.add_member(:environment_parameters, Shapes::ShapeRef.new(shape: EnvironmentParameters, location_name: "EnvironmentParameters")) ModelConfiguration.add_member(:compilation_job_name, Shapes::ShapeRef.new(shape: RecommendationJobCompilationJobName, location_name: "CompilationJobName")) @@ -7927,6 +8039,10 @@ module ClientApi ModelQualityJobInput.add_member(:ground_truth_s3_input, Shapes::ShapeRef.new(shape: MonitoringGroundTruthS3Input, required: true, location_name: "GroundTruthS3Input")) ModelQualityJobInput.struct_class = Types::ModelQualityJobInput + ModelQuantizationConfig.add_member(:image, Shapes::ShapeRef.new(shape: OptimizationContainerImage, location_name: "Image")) + ModelQuantizationConfig.add_member(:override_environment, Shapes::ShapeRef.new(shape: OptimizationJobEnvironmentVariables, location_name: "OverrideEnvironment")) + ModelQuantizationConfig.struct_class = Types::ModelQuantizationConfig + ModelRegisterSettings.add_member(:status, Shapes::ShapeRef.new(shape: FeatureStatus, location_name: "Status")) ModelRegisterSettings.add_member(:cross_account_model_register_role_arn, Shapes::ShapeRef.new(shape: RoleArn, location_name: "CrossAccountModelRegisterRoleArn")) ModelRegisterSettings.struct_class = Types::ModelRegisterSettings @@ -8230,6 +8346,59 @@ module ClientApi OnlineStoreSecurityConfig.add_member(:kms_key_id, Shapes::ShapeRef.new(shape: KmsKeyId, location_name: "KmsKeyId")) OnlineStoreSecurityConfig.struct_class = Types::OnlineStoreSecurityConfig + OptimizationConfig.add_member(:model_quantization_config, Shapes::ShapeRef.new(shape: ModelQuantizationConfig, location_name: "ModelQuantizationConfig")) + OptimizationConfig.add_member(:model_compilation_config, Shapes::ShapeRef.new(shape: ModelCompilationConfig, location_name: "ModelCompilationConfig")) + OptimizationConfig.add_member(:unknown, Shapes::ShapeRef.new(shape: nil, location_name: 'unknown')) + OptimizationConfig.add_member_subclass(:model_quantization_config, Types::OptimizationConfig::ModelQuantizationConfig) + OptimizationConfig.add_member_subclass(:model_compilation_config, Types::OptimizationConfig::ModelCompilationConfig) + OptimizationConfig.add_member_subclass(:unknown, Types::OptimizationConfig::Unknown) + OptimizationConfig.struct_class = Types::OptimizationConfig + + OptimizationConfigs.member = Shapes::ShapeRef.new(shape: OptimizationConfig) + + OptimizationJobEnvironmentVariables.key = Shapes::ShapeRef.new(shape: NonEmptyString256) + OptimizationJobEnvironmentVariables.value = Shapes::ShapeRef.new(shape: String256) + + OptimizationJobModelSource.add_member(:s3, Shapes::ShapeRef.new(shape: OptimizationJobModelSourceS3, location_name: "S3")) + OptimizationJobModelSource.struct_class = Types::OptimizationJobModelSource + + OptimizationJobModelSourceS3.add_member(:s3_uri, Shapes::ShapeRef.new(shape: S3Uri, location_name: "S3Uri")) + OptimizationJobModelSourceS3.add_member(:model_access_config, Shapes::ShapeRef.new(shape: OptimizationModelAccessConfig, location_name: "ModelAccessConfig")) + OptimizationJobModelSourceS3.struct_class = Types::OptimizationJobModelSourceS3 + + OptimizationJobOutputConfig.add_member(:kms_key_id, Shapes::ShapeRef.new(shape: KmsKeyId, location_name: "KmsKeyId")) + OptimizationJobOutputConfig.add_member(:s3_output_location, Shapes::ShapeRef.new(shape: S3Uri, required: true, location_name: "S3OutputLocation")) + OptimizationJobOutputConfig.struct_class = Types::OptimizationJobOutputConfig + + OptimizationJobSummaries.member = Shapes::ShapeRef.new(shape: OptimizationJobSummary) + + OptimizationJobSummary.add_member(:optimization_job_name, Shapes::ShapeRef.new(shape: EntityName, required: true, location_name: "OptimizationJobName")) + OptimizationJobSummary.add_member(:optimization_job_arn, Shapes::ShapeRef.new(shape: OptimizationJobArn, required: true, location_name: "OptimizationJobArn")) + OptimizationJobSummary.add_member(:creation_time, Shapes::ShapeRef.new(shape: CreationTime, required: true, location_name: "CreationTime")) + OptimizationJobSummary.add_member(:optimization_job_status, Shapes::ShapeRef.new(shape: OptimizationJobStatus, required: true, location_name: "OptimizationJobStatus")) + OptimizationJobSummary.add_member(:optimization_start_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "OptimizationStartTime")) + OptimizationJobSummary.add_member(:optimization_end_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "OptimizationEndTime")) + OptimizationJobSummary.add_member(:last_modified_time, Shapes::ShapeRef.new(shape: LastModifiedTime, location_name: "LastModifiedTime")) + OptimizationJobSummary.add_member(:deployment_instance_type, Shapes::ShapeRef.new(shape: OptimizationJobDeploymentInstanceType, required: true, location_name: "DeploymentInstanceType")) + OptimizationJobSummary.add_member(:optimization_types, Shapes::ShapeRef.new(shape: OptimizationTypes, required: true, location_name: "OptimizationTypes")) + OptimizationJobSummary.struct_class = Types::OptimizationJobSummary + + OptimizationModelAccessConfig.add_member(:accept_eula, Shapes::ShapeRef.new(shape: OptimizationModelAcceptEula, required: true, location_name: "AcceptEula")) + OptimizationModelAccessConfig.struct_class = Types::OptimizationModelAccessConfig + + OptimizationOutput.add_member(:recommended_inference_image, Shapes::ShapeRef.new(shape: OptimizationContainerImage, location_name: "RecommendedInferenceImage")) + OptimizationOutput.struct_class = Types::OptimizationOutput + + OptimizationTypes.member = Shapes::ShapeRef.new(shape: OptimizationType) + + OptimizationVpcConfig.add_member(:security_group_ids, Shapes::ShapeRef.new(shape: OptimizationVpcSecurityGroupIds, required: true, location_name: "SecurityGroupIds")) + OptimizationVpcConfig.add_member(:subnets, Shapes::ShapeRef.new(shape: OptimizationVpcSubnets, required: true, location_name: "Subnets")) + OptimizationVpcConfig.struct_class = Types::OptimizationVpcConfig + + OptimizationVpcSecurityGroupIds.member = Shapes::ShapeRef.new(shape: OptimizationVpcSecurityGroupId) + + OptimizationVpcSubnets.member = Shapes::ShapeRef.new(shape: OptimizationVpcSubnetId) + OutputConfig.add_member(:s3_output_location, Shapes::ShapeRef.new(shape: S3Uri, required: true, location_name: "S3OutputLocation")) OutputConfig.add_member(:target_device, Shapes::ShapeRef.new(shape: TargetDevice, location_name: "TargetDevice")) OutputConfig.add_member(:target_platform, Shapes::ShapeRef.new(shape: TargetPlatform, location_name: "TargetPlatform")) @@ -9217,6 +9386,9 @@ module ClientApi StopNotebookInstanceInput.add_member(:notebook_instance_name, Shapes::ShapeRef.new(shape: NotebookInstanceName, required: true, location_name: "NotebookInstanceName")) StopNotebookInstanceInput.struct_class = Types::StopNotebookInstanceInput + StopOptimizationJobRequest.add_member(:optimization_job_name, Shapes::ShapeRef.new(shape: EntityName, required: true, location_name: "OptimizationJobName")) + StopOptimizationJobRequest.struct_class = Types::StopOptimizationJobRequest + StopPipelineExecutionRequest.add_member(:pipeline_execution_arn, Shapes::ShapeRef.new(shape: PipelineExecutionArn, required: true, location_name: "PipelineExecutionArn")) StopPipelineExecutionRequest.add_member(:client_request_token, Shapes::ShapeRef.new(shape: IdempotencyToken, required: true, location_name: "ClientRequestToken", metadata: {"idempotencyToken"=>true})) StopPipelineExecutionRequest.struct_class = Types::StopPipelineExecutionRequest @@ -10659,6 +10831,16 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ResourceLimitExceeded) end) + api.add_operation(:create_optimization_job, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateOptimizationJob" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: CreateOptimizationJobRequest) + o.output = Shapes::ShapeRef.new(shape: CreateOptimizationJobResponse) + o.errors << Shapes::ShapeRef.new(shape: ResourceInUse) + o.errors << Shapes::ShapeRef.new(shape: ResourceLimitExceeded) + end) + api.add_operation(:create_pipeline, Seahorse::Model::Operation.new.tap do |o| o.name = "CreatePipeline" o.http_method = "POST" @@ -11174,6 +11356,15 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) + api.add_operation(:delete_optimization_job, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteOptimizationJob" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DeleteOptimizationJobRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFound) + end) + api.add_operation(:delete_pipeline, Seahorse::Model::Operation.new.tap do |o| o.name = "DeletePipeline" o.http_method = "POST" @@ -11687,6 +11878,15 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: DescribeNotebookInstanceLifecycleConfigOutput) end) + api.add_operation(:describe_optimization_job, Seahorse::Model::Operation.new.tap do |o| + o.name = "DescribeOptimizationJob" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: DescribeOptimizationJobRequest) + o.output = Shapes::ShapeRef.new(shape: DescribeOptimizationJobResponse) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFound) + end) + api.add_operation(:describe_pipeline, Seahorse::Model::Operation.new.tap do |o| o.name = "DescribePipeline" o.http_method = "POST" @@ -12686,6 +12886,20 @@ module ClientApi ) end) + api.add_operation(:list_optimization_jobs, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListOptimizationJobs" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: ListOptimizationJobsRequest) + o.output = Shapes::ShapeRef.new(shape: ListOptimizationJobsResponse) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) + end) + api.add_operation(:list_pipeline_execution_steps, Seahorse::Model::Operation.new.tap do |o| o.name = "ListPipelineExecutionSteps" o.http_method = "POST" @@ -13217,6 +13431,15 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) + api.add_operation(:stop_optimization_job, Seahorse::Model::Operation.new.tap do |o| + o.name = "StopOptimizationJob" + o.http_method = "POST" + o.http_request_uri = "/" + o.input = Shapes::ShapeRef.new(shape: StopOptimizationJobRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFound) + end) + api.add_operation(:stop_pipeline_execution, Seahorse::Model::Operation.new.tap do |o| o.name = "StopPipelineExecution" o.http_method = "POST" diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/endpoints.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/endpoints.rb index 2d68724b7a6..afe7c14397b 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/endpoints.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/endpoints.rb @@ -684,6 +684,20 @@ def self.build(context) end end + class CreateOptimizationJob + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SageMaker::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class CreatePipeline def self.build(context) unless context.config.regional_endpoint @@ -1468,6 +1482,20 @@ def self.build(context) end end + class DeleteOptimizationJob + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SageMaker::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class DeletePipeline def self.build(context) unless context.config.regional_endpoint @@ -2280,6 +2308,20 @@ def self.build(context) end end + class DescribeOptimizationJob + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SageMaker::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class DescribePipeline def self.build(context) unless context.config.regional_endpoint @@ -3414,6 +3456,20 @@ def self.build(context) end end + class ListOptimizationJobs + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SageMaker::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class ListPipelineExecutionSteps def self.build(context) unless context.config.regional_endpoint @@ -4044,6 +4100,20 @@ def self.build(context) end end + class StopOptimizationJob + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::SageMaker::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class StopPipelineExecution def self.build(context) unless context.config.regional_endpoint diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/plugins/endpoints.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/plugins/endpoints.rb index dd387014dec..75bb640b884 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/plugins/endpoints.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/plugins/endpoints.rb @@ -154,6 +154,8 @@ def parameters_for_operation(context) Aws::SageMaker::Endpoints::CreateNotebookInstance.build(context) when :create_notebook_instance_lifecycle_config Aws::SageMaker::Endpoints::CreateNotebookInstanceLifecycleConfig.build(context) + when :create_optimization_job + Aws::SageMaker::Endpoints::CreateOptimizationJob.build(context) when :create_pipeline Aws::SageMaker::Endpoints::CreatePipeline.build(context) when :create_presigned_domain_url @@ -266,6 +268,8 @@ def parameters_for_operation(context) Aws::SageMaker::Endpoints::DeleteNotebookInstance.build(context) when :delete_notebook_instance_lifecycle_config Aws::SageMaker::Endpoints::DeleteNotebookInstanceLifecycleConfig.build(context) + when :delete_optimization_job + Aws::SageMaker::Endpoints::DeleteOptimizationJob.build(context) when :delete_pipeline Aws::SageMaker::Endpoints::DeletePipeline.build(context) when :delete_project @@ -382,6 +386,8 @@ def parameters_for_operation(context) Aws::SageMaker::Endpoints::DescribeNotebookInstance.build(context) when :describe_notebook_instance_lifecycle_config Aws::SageMaker::Endpoints::DescribeNotebookInstanceLifecycleConfig.build(context) + when :describe_optimization_job + Aws::SageMaker::Endpoints::DescribeOptimizationJob.build(context) when :describe_pipeline Aws::SageMaker::Endpoints::DescribePipeline.build(context) when :describe_pipeline_definition_for_execution @@ -544,6 +550,8 @@ def parameters_for_operation(context) Aws::SageMaker::Endpoints::ListNotebookInstanceLifecycleConfigs.build(context) when :list_notebook_instances Aws::SageMaker::Endpoints::ListNotebookInstances.build(context) + when :list_optimization_jobs + Aws::SageMaker::Endpoints::ListOptimizationJobs.build(context) when :list_pipeline_execution_steps Aws::SageMaker::Endpoints::ListPipelineExecutionSteps.build(context) when :list_pipeline_executions @@ -634,6 +642,8 @@ def parameters_for_operation(context) Aws::SageMaker::Endpoints::StopMonitoringSchedule.build(context) when :stop_notebook_instance Aws::SageMaker::Endpoints::StopNotebookInstance.build(context) + when :stop_optimization_job + Aws::SageMaker::Endpoints::StopOptimizationJob.build(context) when :stop_pipeline_execution Aws::SageMaker::Endpoints::StopPipelineExecution.build(context) when :stop_processing_job diff --git a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/types.rb b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/types.rb index e8c729a5b58..f5cbc4e47df 100644 --- a/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/types.rb +++ b/gems/aws-sdk-sagemaker/lib/aws-sdk-sagemaker/types.rb @@ -223,6 +223,27 @@ class AdditionalInferenceSpecificationDefinition < Struct.new( include Aws::Structure end + # Data sources that are available to your model in addition to the one + # that you specify for `ModelDataSource` when you use the `CreateModel` + # action. + # + # @!attribute [rw] channel_name + # A custom name for this `AdditionalModelDataSource` object. + # @return [String] + # + # @!attribute [rw] s3_data_source + # Specifies the S3 location of ML model data to deploy. + # @return [Types::S3ModelDataSource] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AdditionalModelDataSource AWS API Documentation + # + class AdditionalModelDataSource < Struct.new( + :channel_name, + :s3_data_source) + SENSITIVE = [] + include Aws::Structure + end + # A data source used for training or inference that is in addition to # the input dataset or model data. # @@ -593,6 +614,26 @@ class AlgorithmValidationSpecification < Struct.new( include Aws::Structure end + # A collection of settings that configure the Amazon Q experience within + # the domain. + # + # @!attribute [rw] status + # Whether Amazon Q has been enabled within the domain. + # @return [String] + # + # @!attribute [rw] q_profile_arn + # The ARN of the Amazon Q profile used within the domain. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/AmazonQSettings AWS API Documentation + # + class AmazonQSettings < Struct.new( + :status, + :q_profile_arn) + SENSITIVE = [] + include Aws::Structure + end + # Configures how labels are consolidated across human workers and # processes output data. # @@ -4812,6 +4853,12 @@ class ContainerConfig < Struct.new( # # @return [Types::ModelDataSource] # + # @!attribute [rw] additional_model_data_sources + # Data sources that are available to your model in addition to the one + # that you specify for `ModelDataSource` when you use the + # `CreateModel` action. + # @return [Array] + # # @!attribute [rw] environment # The environment variables to set in the Docker container. # @@ -4844,6 +4891,7 @@ class ContainerDefinition < Struct.new( :mode, :model_data_url, :model_data_source, + :additional_model_data_sources, :environment, :model_package_name, :inference_specification_name, @@ -8674,6 +8722,126 @@ class CreateNotebookInstanceOutput < Struct.new( include Aws::Structure end + # @!attribute [rw] optimization_job_name + # A custom name for the new optimization job. + # @return [String] + # + # @!attribute [rw] role_arn + # The Amazon Resource Name (ARN) of an IAM role that enables Amazon + # SageMaker to perform tasks on your behalf. + # + # During model optimization, Amazon SageMaker needs your permission + # to: + # + # * Read input data from an S3 bucket + # + # * Write model artifacts to an S3 bucket + # + # * Write logs to Amazon CloudWatch Logs + # + # * Publish metrics to Amazon CloudWatch + # + # You grant permissions for all of these tasks to an IAM role. To pass + # this role to Amazon SageMaker, the caller of this API must have the + # `iam:PassRole` permission. For more information, see [Amazon + # SageMaker Roles.][1] + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html + # @return [String] + # + # @!attribute [rw] model_source + # The location of the source model to optimize with an optimization + # job. + # @return [Types::OptimizationJobModelSource] + # + # @!attribute [rw] deployment_instance_type + # The type of instance that hosts the optimized model that you create + # with the optimization job. + # @return [String] + # + # @!attribute [rw] optimization_environment + # The environment variables to set in the model container. + # @return [Hash] + # + # @!attribute [rw] optimization_configs + # Settings for each of the optimization techniques that the job + # applies. + # @return [Array] + # + # @!attribute [rw] output_config + # Details for where to store the optimized model that you create with + # the optimization job. + # @return [Types::OptimizationJobOutputConfig] + # + # @!attribute [rw] stopping_condition + # Specifies a limit to how long a job can run. When the job reaches + # the time limit, SageMaker ends the job. Use this API to cap costs. + # + # To stop a training job, SageMaker sends the algorithm the `SIGTERM` + # signal, which delays job termination for 120 seconds. Algorithms can + # use this 120-second window to save the model artifacts, so the + # results of training are not lost. + # + # The training algorithms provided by SageMaker automatically save the + # intermediate results of a model training job when possible. This + # attempt to save artifacts is only a best effort case as model might + # not be in a state from which it can be saved. For example, if + # training has just started, the model might not be ready to save. + # When saved, this intermediate data is a valid model artifact. You + # can use it to create a model with `CreateModel`. + # + # The Neural Topic Model (NTM) currently does not support saving + # intermediate model artifacts. When training NTMs, make sure that the + # maximum runtime is sufficient for the training job to complete. + # + # + # @return [Types::StoppingCondition] + # + # @!attribute [rw] tags + # A list of key-value pairs associated with the optimization job. For + # more information, see [Tagging Amazon Web Services resources][1] in + # the *Amazon Web Services General Reference Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + # @return [Array] + # + # @!attribute [rw] vpc_config + # A VPC in Amazon VPC that your optimized model has access to. + # @return [Types::OptimizationVpcConfig] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateOptimizationJobRequest AWS API Documentation + # + class CreateOptimizationJobRequest < Struct.new( + :optimization_job_name, + :role_arn, + :model_source, + :deployment_instance_type, + :optimization_environment, + :optimization_configs, + :output_config, + :stopping_condition, + :tags, + :vpc_config) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] optimization_job_arn + # The Amazon Resource Name (ARN) of the optimization job. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateOptimizationJobResponse AWS API Documentation + # + class CreateOptimizationJobResponse < Struct.new( + :optimization_job_arn) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] pipeline_name # The name of the pipeline. # @return [String] @@ -11386,6 +11554,18 @@ class DeleteNotebookInstanceLifecycleConfigInput < Struct.new( include Aws::Structure end + # @!attribute [rw] optimization_job_name + # The name that you assigned to the optimization job. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteOptimizationJobRequest AWS API Documentation + # + class DeleteOptimizationJobRequest < Struct.new( + :optimization_job_name) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] pipeline_name # The name of the pipeline to delete. # @return [String] @@ -16373,6 +16553,135 @@ class DescribeNotebookInstanceOutput < Struct.new( include Aws::Structure end + # @!attribute [rw] optimization_job_name + # The name that you assigned to the optimization job. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeOptimizationJobRequest AWS API Documentation + # + class DescribeOptimizationJobRequest < Struct.new( + :optimization_job_name) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] optimization_job_arn + # The Amazon Resource Name (ARN) of the optimization job. + # @return [String] + # + # @!attribute [rw] optimization_job_status + # The current status of the optimization job. + # @return [String] + # + # @!attribute [rw] optimization_start_time + # The time when the optimization job started. + # @return [Time] + # + # @!attribute [rw] optimization_end_time + # The time when the optimization job finished processing. + # @return [Time] + # + # @!attribute [rw] creation_time + # The time when you created the optimization job. + # @return [Time] + # + # @!attribute [rw] last_modified_time + # The time when the optimization job was last updated. + # @return [Time] + # + # @!attribute [rw] failure_reason + # If the optimization job status is `FAILED`, the reason for the + # failure. + # @return [String] + # + # @!attribute [rw] optimization_job_name + # The name that you assigned to the optimization job. + # @return [String] + # + # @!attribute [rw] model_source + # The location of the source model to optimize with an optimization + # job. + # @return [Types::OptimizationJobModelSource] + # + # @!attribute [rw] optimization_environment + # The environment variables to set in the model container. + # @return [Hash] + # + # @!attribute [rw] deployment_instance_type + # The type of instance that hosts the optimized model that you create + # with the optimization job. + # @return [String] + # + # @!attribute [rw] optimization_configs + # Settings for each of the optimization techniques that the job + # applies. + # @return [Array] + # + # @!attribute [rw] output_config + # Details for where to store the optimized model that you create with + # the optimization job. + # @return [Types::OptimizationJobOutputConfig] + # + # @!attribute [rw] optimization_output + # Output values produced by an optimization job. + # @return [Types::OptimizationOutput] + # + # @!attribute [rw] role_arn + # The ARN of the IAM role that you assigned to the optimization job. + # @return [String] + # + # @!attribute [rw] stopping_condition + # Specifies a limit to how long a job can run. When the job reaches + # the time limit, SageMaker ends the job. Use this API to cap costs. + # + # To stop a training job, SageMaker sends the algorithm the `SIGTERM` + # signal, which delays job termination for 120 seconds. Algorithms can + # use this 120-second window to save the model artifacts, so the + # results of training are not lost. + # + # The training algorithms provided by SageMaker automatically save the + # intermediate results of a model training job when possible. This + # attempt to save artifacts is only a best effort case as model might + # not be in a state from which it can be saved. For example, if + # training has just started, the model might not be ready to save. + # When saved, this intermediate data is a valid model artifact. You + # can use it to create a model with `CreateModel`. + # + # The Neural Topic Model (NTM) currently does not support saving + # intermediate model artifacts. When training NTMs, make sure that the + # maximum runtime is sufficient for the training job to complete. + # + # + # @return [Types::StoppingCondition] + # + # @!attribute [rw] vpc_config + # A VPC in Amazon VPC that your optimized model has access to. + # @return [Types::OptimizationVpcConfig] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeOptimizationJobResponse AWS API Documentation + # + class DescribeOptimizationJobResponse < Struct.new( + :optimization_job_arn, + :optimization_job_status, + :optimization_start_time, + :optimization_end_time, + :creation_time, + :last_modified_time, + :failure_reason, + :optimization_job_name, + :model_source, + :optimization_environment, + :deployment_instance_type, + :optimization_configs, + :output_config, + :optimization_output, + :role_arn, + :stopping_condition, + :vpc_config) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] pipeline_execution_arn # The Amazon Resource Name (ARN) of the pipeline execution. # @return [String] @@ -18292,13 +18601,20 @@ class DomainDetails < Struct.new( # interaction. # @return [Types::DockerSettings] # + # @!attribute [rw] amazon_q_settings + # A collection of settings that configure the Amazon Q experience + # within the domain. The `AuthMode` that you use to create the domain + # must be `SSO`. + # @return [Types::AmazonQSettings] + # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DomainSettings AWS API Documentation # class DomainSettings < Struct.new( :security_group_ids, :r_studio_server_pro_domain_settings, :execution_role_identity_config, - :docker_settings) + :docker_settings, + :amazon_q_settings) SENSITIVE = [] include Aws::Structure end @@ -18333,13 +18649,19 @@ class DomainSettings < Struct.new( # interaction. # @return [Types::DockerSettings] # + # @!attribute [rw] amazon_q_settings + # A collection of settings that configure the Amazon Q experience + # within the domain. + # @return [Types::AmazonQSettings] + # # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DomainSettingsForUpdate AWS API Documentation # class DomainSettingsForUpdate < Struct.new( :r_studio_server_pro_domain_settings_for_update, :execution_role_identity_config, :security_group_ids, - :docker_settings) + :docker_settings, + :amazon_q_settings) SENSITIVE = [] include Aws::Structure end @@ -29506,6 +29828,99 @@ class ListNotebookInstancesOutput < Struct.new( include Aws::Structure end + # @!attribute [rw] next_token + # A token that you use to get the next set of results following a + # truncated response. If the response to the previous request was + # truncated, that response provides the value for this token. + # @return [String] + # + # @!attribute [rw] max_results + # The maximum number of optimization jobs to return in the response. + # The default is 50. + # @return [Integer] + # + # @!attribute [rw] creation_time_after + # Filters the results to only those optimization jobs that were + # created after the specified time. + # @return [Time] + # + # @!attribute [rw] creation_time_before + # Filters the results to only those optimization jobs that were + # created before the specified time. + # @return [Time] + # + # @!attribute [rw] last_modified_time_after + # Filters the results to only those optimization jobs that were + # updated after the specified time. + # @return [Time] + # + # @!attribute [rw] last_modified_time_before + # Filters the results to only those optimization jobs that were + # updated before the specified time. + # @return [Time] + # + # @!attribute [rw] optimization_contains + # Filters the results to only those optimization jobs that apply the + # specified optimization techniques. You can specify either + # `Quantization` or `Compilation`. + # @return [String] + # + # @!attribute [rw] name_contains + # Filters the results to only those optimization jobs with a name that + # contains the specified string. + # @return [String] + # + # @!attribute [rw] status_equals + # Filters the results to only those optimization jobs with the + # specified status. + # @return [String] + # + # @!attribute [rw] sort_by + # The field by which to sort the optimization jobs in the response. + # The default is `CreationTime` + # @return [String] + # + # @!attribute [rw] sort_order + # The sort order for results. The default is `Ascending` + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListOptimizationJobsRequest AWS API Documentation + # + class ListOptimizationJobsRequest < Struct.new( + :next_token, + :max_results, + :creation_time_after, + :creation_time_before, + :last_modified_time_after, + :last_modified_time_before, + :optimization_contains, + :name_contains, + :status_equals, + :sort_by, + :sort_order) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] optimization_job_summaries + # A list of optimization jobs and their properties that matches any of + # the filters you specified in the request. + # @return [Array] + # + # @!attribute [rw] next_token + # The token to use in a subsequent request to get the next set of + # results following a truncated response. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListOptimizationJobsResponse AWS API Documentation + # + class ListOptimizationJobsResponse < Struct.new( + :optimization_job_summaries, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] pipeline_execution_arn # The Amazon Resource Name (ARN) of the pipeline execution. # @return [String] @@ -31544,6 +31959,28 @@ class ModelClientConfig < Struct.new( include Aws::Structure end + # Settings for the model compilation technique that's applied by a + # model optimization job. + # + # @!attribute [rw] image + # The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to + # run the optimization. + # @return [String] + # + # @!attribute [rw] override_environment + # Environment variables that override the default ones in the model + # container. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ModelCompilationConfig AWS API Documentation + # + class ModelCompilationConfig < Struct.new( + :image, + :override_environment) + SENSITIVE = [] + include Aws::Structure + end + # Defines the model configuration. Includes the specification name and # environment parameters. # @@ -32887,6 +33324,28 @@ class ModelQualityJobInput < Struct.new( include Aws::Structure end + # Settings for the model quantization technique that's applied by a + # model optimization job. + # + # @!attribute [rw] image + # The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to + # run the optimization. + # @return [String] + # + # @!attribute [rw] override_environment + # Environment variables that override the default ones in the model + # container. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ModelQuantizationConfig AWS API Documentation + # + class ModelQuantizationConfig < Struct.new( + :image, + :override_environment) + SENSITIVE = [] + include Aws::Structure + end + # The model registry settings for the SageMaker Canvas application. # # @!attribute [rw] status @@ -34473,6 +34932,221 @@ class OnlineStoreSecurityConfig < Struct.new( include Aws::Structure end + # Settings for an optimization technique that you apply with a model + # optimization job. + # + # @note OptimizationConfig is a union - when making an API calls you must set exactly one of the members. + # + # @note OptimizationConfig is a union - when returned from an API call exactly one value will be set and the returned type will be a subclass of OptimizationConfig corresponding to the set member. + # + # @!attribute [rw] model_quantization_config + # Settings for the model quantization technique that's applied by a + # model optimization job. + # @return [Types::ModelQuantizationConfig] + # + # @!attribute [rw] model_compilation_config + # Settings for the model compilation technique that's applied by a + # model optimization job. + # @return [Types::ModelCompilationConfig] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OptimizationConfig AWS API Documentation + # + class OptimizationConfig < Struct.new( + :model_quantization_config, + :model_compilation_config, + :unknown) + SENSITIVE = [] + include Aws::Structure + include Aws::Structure::Union + + class ModelQuantizationConfig < OptimizationConfig; end + class ModelCompilationConfig < OptimizationConfig; end + class Unknown < OptimizationConfig; end + end + + # The location of the source model to optimize with an optimization job. + # + # @!attribute [rw] s3 + # The Amazon S3 location of a source model to optimize with an + # optimization job. + # @return [Types::OptimizationJobModelSourceS3] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OptimizationJobModelSource AWS API Documentation + # + class OptimizationJobModelSource < Struct.new( + :s3) + SENSITIVE = [] + include Aws::Structure + end + + # The Amazon S3 location of a source model to optimize with an + # optimization job. + # + # @!attribute [rw] s3_uri + # An Amazon S3 URI that locates a source model to optimize with an + # optimization job. + # @return [String] + # + # @!attribute [rw] model_access_config + # The access configuration settings for the source ML model for an + # optimization job, where you can accept the model end-user license + # agreement (EULA). + # @return [Types::OptimizationModelAccessConfig] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OptimizationJobModelSourceS3 AWS API Documentation + # + class OptimizationJobModelSourceS3 < Struct.new( + :s3_uri, + :model_access_config) + SENSITIVE = [] + include Aws::Structure + end + + # Details for where to store the optimized model that you create with + # the optimization job. + # + # @!attribute [rw] kms_key_id + # The Amazon Resource Name (ARN) of a key in Amazon Web Services KMS. + # SageMaker uses they key to encrypt the artifacts of the optimized + # model when SageMaker uploads the model to Amazon S3. + # @return [String] + # + # @!attribute [rw] s3_output_location + # The Amazon S3 URI for where to store the optimized model that you + # create with an optimization job. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OptimizationJobOutputConfig AWS API Documentation + # + class OptimizationJobOutputConfig < Struct.new( + :kms_key_id, + :s3_output_location) + SENSITIVE = [] + include Aws::Structure + end + + # Summarizes an optimization job by providing some of its key + # properties. + # + # @!attribute [rw] optimization_job_name + # The name that you assigned to the optimization job. + # @return [String] + # + # @!attribute [rw] optimization_job_arn + # The Amazon Resource Name (ARN) of the optimization job. + # @return [String] + # + # @!attribute [rw] creation_time + # The time when you created the optimization job. + # @return [Time] + # + # @!attribute [rw] optimization_job_status + # The current status of the optimization job. + # @return [String] + # + # @!attribute [rw] optimization_start_time + # The time when the optimization job started. + # @return [Time] + # + # @!attribute [rw] optimization_end_time + # The time when the optimization job finished processing. + # @return [Time] + # + # @!attribute [rw] last_modified_time + # The time when the optimization job was last updated. + # @return [Time] + # + # @!attribute [rw] deployment_instance_type + # The type of instance that hosts the optimized model that you create + # with the optimization job. + # @return [String] + # + # @!attribute [rw] optimization_types + # The optimization techniques that are applied by the optimization + # job. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OptimizationJobSummary AWS API Documentation + # + class OptimizationJobSummary < Struct.new( + :optimization_job_name, + :optimization_job_arn, + :creation_time, + :optimization_job_status, + :optimization_start_time, + :optimization_end_time, + :last_modified_time, + :deployment_instance_type, + :optimization_types) + SENSITIVE = [] + include Aws::Structure + end + + # The access configuration settings for the source ML model for an + # optimization job, where you can accept the model end-user license + # agreement (EULA). + # + # @!attribute [rw] accept_eula + # Specifies agreement to the model end-user license agreement (EULA). + # The `AcceptEula` value must be explicitly defined as `True` in order + # to accept the EULA that this model requires. You are responsible for + # reviewing and complying with any applicable license terms and making + # sure they are acceptable for your use case before downloading or + # using a model. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OptimizationModelAccessConfig AWS API Documentation + # + class OptimizationModelAccessConfig < Struct.new( + :accept_eula) + SENSITIVE = [] + include Aws::Structure + end + + # Output values produced by an optimization job. + # + # @!attribute [rw] recommended_inference_image + # The image that SageMaker recommends that you use to host the + # optimized model that you created with an optimization job. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OptimizationOutput AWS API Documentation + # + class OptimizationOutput < Struct.new( + :recommended_inference_image) + SENSITIVE = [] + include Aws::Structure + end + + # A VPC in Amazon VPC that's accessible to an optimized that you create + # with an optimization job. You can control access to and from your + # resources by configuring a VPC. For more information, see [Give + # SageMaker Access to Resources in your Amazon VPC][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html + # + # @!attribute [rw] security_group_ids + # The VPC security group IDs, in the form `sg-xxxxxxxx`. Specify the + # security groups for the VPC that is specified in the `Subnets` + # field. + # @return [Array] + # + # @!attribute [rw] subnets + # The ID of the subnets in the VPC to which you want to connect your + # optimized model. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/OptimizationVpcConfig AWS API Documentation + # + class OptimizationVpcConfig < Struct.new( + :security_group_ids, + :subnets) + SENSITIVE = [] + include Aws::Structure + end + # Contains information about the output location for the compiled model # and the target device that the model runs on. `TargetDevice` and # `TargetPlatform` are mutually exclusive, so you need to choose one @@ -40597,6 +41271,18 @@ class StopNotebookInstanceInput < Struct.new( include Aws::Structure end + # @!attribute [rw] optimization_job_name + # The name that you assigned to the optimization job. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopOptimizationJobRequest AWS API Documentation + # + class StopOptimizationJobRequest < Struct.new( + :optimization_job_name) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] pipeline_execution_arn # The Amazon Resource Name (ARN) of the pipeline execution. # @return [String] @@ -40667,11 +41353,8 @@ class StopTransformJobRequest < Struct.new( include Aws::Structure end - # Specifies a limit to how long a model training job or model - # compilation job can run. It also specifies how long a managed spot - # training job has to complete. When the job reaches the time limit, - # SageMaker ends the training or compilation job. Use this API to cap - # model training costs. + # Specifies a limit to how long a job can run. When the job reaches the + # time limit, SageMaker ends the job. Use this API to cap costs. # # To stop a training job, SageMaker sends the algorithm the `SIGTERM` # signal, which delays job termination for 120 seconds. Algorithms can diff --git a/gems/aws-sdk-sagemaker/sig/client.rbs b/gems/aws-sdk-sagemaker/sig/client.rbs index 98e54c42b51..5898a4f1c7a 100644 --- a/gems/aws-sdk-sagemaker/sig/client.rbs +++ b/gems/aws-sdk-sagemaker/sig/client.rbs @@ -1125,6 +1125,10 @@ module Aws docker_settings: { enable_docker_access: ("ENABLED" | "DISABLED")?, vpc_only_trusted_accounts: Array[::String]? + }?, + amazon_q_settings: { + status: ("ENABLED" | "DISABLED")?, + q_profile_arn: ::String? }? }, subnet_ids: Array[::String], @@ -2402,6 +2406,22 @@ module Aws }? }? }?, + additional_model_data_sources: Array[ + { + channel_name: ::String, + s3_data_source: { + s3_uri: ::String, + s3_data_type: ("S3Prefix" | "S3Object"), + compression_type: ("None" | "Gzip"), + model_access_config: { + accept_eula: bool + }?, + hub_access_config: { + hub_content_arn: ::String + }? + } + }, + ]?, environment: Hash[::String, ::String]?, model_package_name: ::String?, inference_specification_name: ::String?, @@ -2434,6 +2454,22 @@ module Aws }? }? }?, + additional_model_data_sources: Array[ + { + channel_name: ::String, + s3_data_source: { + s3_uri: ::String, + s3_data_type: ("S3Prefix" | "S3Object"), + compression_type: ("None" | "Gzip"), + model_access_config: { + accept_eula: bool + }?, + hub_access_config: { + hub_content_arn: ::String + }? + } + }, + ]?, environment: Hash[::String, ::String]?, model_package_name: ::String?, inference_specification_name: ::String?, @@ -3268,6 +3304,58 @@ module Aws ) -> _CreateNotebookInstanceLifecycleConfigResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateNotebookInstanceLifecycleConfigResponseSuccess + interface _CreateOptimizationJobResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::CreateOptimizationJobResponse] + def optimization_job_arn: () -> ::String + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/SageMaker/Client.html#create_optimization_job-instance_method + def create_optimization_job: ( + optimization_job_name: ::String, + role_arn: ::String, + model_source: { + s3: { + s3_uri: ::String?, + model_access_config: { + accept_eula: bool + }? + }? + }, + deployment_instance_type: ("ml.p4d.24xlarge" | "ml.p4de.24xlarge" | "ml.p5.48xlarge" | "ml.g5.xlarge" | "ml.g5.2xlarge" | "ml.g5.4xlarge" | "ml.g5.8xlarge" | "ml.g5.12xlarge" | "ml.g5.16xlarge" | "ml.g5.24xlarge" | "ml.g5.48xlarge" | "ml.g6.xlarge" | "ml.g6.2xlarge" | "ml.g6.4xlarge" | "ml.g6.8xlarge" | "ml.g6.12xlarge" | "ml.g6.16xlarge" | "ml.g6.24xlarge" | "ml.g6.48xlarge" | "ml.inf2.xlarge" | "ml.inf2.8xlarge" | "ml.inf2.24xlarge" | "ml.inf2.48xlarge" | "ml.trn1.2xlarge" | "ml.trn1.32xlarge" | "ml.trn1n.32xlarge"), + ?optimization_environment: Hash[::String, ::String], + optimization_configs: Array[ + { + model_quantization_config: { + image: ::String?, + override_environment: Hash[::String, ::String]? + }?, + model_compilation_config: { + image: ::String?, + override_environment: Hash[::String, ::String]? + }? + }, + ], + output_config: { + kms_key_id: ::String?, + s3_output_location: ::String + }, + stopping_condition: { + max_runtime_in_seconds: ::Integer?, + max_wait_time_in_seconds: ::Integer?, + max_pending_time_in_seconds: ::Integer? + }, + ?tags: Array[ + { + key: ::String, + value: ::String + }, + ], + ?vpc_config: { + security_group_ids: Array[::String], + subnets: Array[::String] + } + ) -> _CreateOptimizationJobResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateOptimizationJobResponseSuccess + interface _CreatePipelineResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CreatePipelineResponse] def pipeline_arn: () -> ::String @@ -4450,6 +4538,12 @@ module Aws ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/SageMaker/Client.html#delete_optimization_job-instance_method + def delete_optimization_job: ( + optimization_job_name: ::String + ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + interface _DeletePipelineResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::DeletePipelineResponse] def pipeline_arn: () -> ::String @@ -5567,6 +5661,32 @@ module Aws ) -> _DescribeNotebookInstanceLifecycleConfigResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeNotebookInstanceLifecycleConfigResponseSuccess + interface _DescribeOptimizationJobResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::DescribeOptimizationJobResponse] + def optimization_job_arn: () -> ::String + def optimization_job_status: () -> ("INPROGRESS" | "COMPLETED" | "FAILED" | "STARTING" | "STOPPING" | "STOPPED") + def optimization_start_time: () -> ::Time + def optimization_end_time: () -> ::Time + def creation_time: () -> ::Time + def last_modified_time: () -> ::Time + def failure_reason: () -> ::String + def optimization_job_name: () -> ::String + def model_source: () -> Types::OptimizationJobModelSource + def optimization_environment: () -> ::Hash[::String, ::String] + def deployment_instance_type: () -> ("ml.p4d.24xlarge" | "ml.p4de.24xlarge" | "ml.p5.48xlarge" | "ml.g5.xlarge" | "ml.g5.2xlarge" | "ml.g5.4xlarge" | "ml.g5.8xlarge" | "ml.g5.12xlarge" | "ml.g5.16xlarge" | "ml.g5.24xlarge" | "ml.g5.48xlarge" | "ml.g6.xlarge" | "ml.g6.2xlarge" | "ml.g6.4xlarge" | "ml.g6.8xlarge" | "ml.g6.12xlarge" | "ml.g6.16xlarge" | "ml.g6.24xlarge" | "ml.g6.48xlarge" | "ml.inf2.xlarge" | "ml.inf2.8xlarge" | "ml.inf2.24xlarge" | "ml.inf2.48xlarge" | "ml.trn1.2xlarge" | "ml.trn1.32xlarge" | "ml.trn1n.32xlarge") + def optimization_configs: () -> ::Array[Types::OptimizationConfig] + def output_config: () -> Types::OptimizationJobOutputConfig + def optimization_output: () -> Types::OptimizationOutput + def role_arn: () -> ::String + def stopping_condition: () -> Types::StoppingCondition + def vpc_config: () -> Types::OptimizationVpcConfig + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/SageMaker/Client.html#describe_optimization_job-instance_method + def describe_optimization_job: ( + optimization_job_name: ::String + ) -> _DescribeOptimizationJobResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeOptimizationJobResponseSuccess + interface _DescribePipelineResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::DescribePipelineResponse] def pipeline_arn: () -> ::String @@ -7066,6 +7186,27 @@ module Aws ) -> _ListNotebookInstancesResponseSuccess | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListNotebookInstancesResponseSuccess + interface _ListOptimizationJobsResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::ListOptimizationJobsResponse] + def optimization_job_summaries: () -> ::Array[Types::OptimizationJobSummary] + def next_token: () -> ::String + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/SageMaker/Client.html#list_optimization_jobs-instance_method + def list_optimization_jobs: ( + ?next_token: ::String, + ?max_results: ::Integer, + ?creation_time_after: ::Time, + ?creation_time_before: ::Time, + ?last_modified_time_after: ::Time, + ?last_modified_time_before: ::Time, + ?optimization_contains: ::String, + ?name_contains: ::String, + ?status_equals: ("INPROGRESS" | "COMPLETED" | "FAILED" | "STARTING" | "STOPPING" | "STOPPED"), + ?sort_by: ("Name" | "CreationTime" | "Status"), + ?sort_order: ("Ascending" | "Descending") + ) -> _ListOptimizationJobsResponseSuccess + | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListOptimizationJobsResponseSuccess + interface _ListPipelineExecutionStepsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListPipelineExecutionStepsResponse] def pipeline_execution_steps: () -> ::Array[Types::PipelineExecutionStep] @@ -7722,6 +7863,12 @@ module Aws ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/SageMaker/Client.html#stop_optimization_job-instance_method + def stop_optimization_job: ( + optimization_job_name: ::String + ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + interface _StopPipelineExecutionResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::StopPipelineExecutionResponse] def pipeline_execution_arn: () -> ::String @@ -8107,6 +8254,10 @@ module Aws docker_settings: { enable_docker_access: ("ENABLED" | "DISABLED")?, vpc_only_trusted_accounts: Array[::String]? + }?, + amazon_q_settings: { + status: ("ENABLED" | "DISABLED")?, + q_profile_arn: ::String? }? }, ?app_security_group_management: ("Service" | "Customer"), diff --git a/gems/aws-sdk-sagemaker/sig/types.rbs b/gems/aws-sdk-sagemaker/sig/types.rbs index f1eda560289..d5bfe7762f2 100644 --- a/gems/aws-sdk-sagemaker/sig/types.rbs +++ b/gems/aws-sdk-sagemaker/sig/types.rbs @@ -61,6 +61,12 @@ module Aws::SageMaker SENSITIVE: [] end + class AdditionalModelDataSource + attr_accessor channel_name: ::String + attr_accessor s3_data_source: Types::S3ModelDataSource + SENSITIVE: [] + end + class AdditionalS3DataSource attr_accessor s3_data_type: ("S3Object" | "S3Prefix") attr_accessor s3_uri: ::String @@ -126,6 +132,12 @@ module Aws::SageMaker SENSITIVE: [] end + class AmazonQSettings + attr_accessor status: ("ENABLED" | "DISABLED") + attr_accessor q_profile_arn: ::String + SENSITIVE: [] + end + class AnnotationConsolidationConfig attr_accessor annotation_consolidation_lambda_arn: ::String SENSITIVE: [] @@ -858,6 +870,7 @@ module Aws::SageMaker attr_accessor mode: ("SingleModel" | "MultiModel") attr_accessor model_data_url: ::String attr_accessor model_data_source: Types::ModelDataSource + attr_accessor additional_model_data_sources: ::Array[Types::AdditionalModelDataSource] attr_accessor environment: ::Hash[::String, ::String] attr_accessor model_package_name: ::String attr_accessor inference_specification_name: ::String @@ -1611,6 +1624,25 @@ module Aws::SageMaker SENSITIVE: [] end + class CreateOptimizationJobRequest + attr_accessor optimization_job_name: ::String + attr_accessor role_arn: ::String + attr_accessor model_source: Types::OptimizationJobModelSource + attr_accessor deployment_instance_type: ("ml.p4d.24xlarge" | "ml.p4de.24xlarge" | "ml.p5.48xlarge" | "ml.g5.xlarge" | "ml.g5.2xlarge" | "ml.g5.4xlarge" | "ml.g5.8xlarge" | "ml.g5.12xlarge" | "ml.g5.16xlarge" | "ml.g5.24xlarge" | "ml.g5.48xlarge" | "ml.g6.xlarge" | "ml.g6.2xlarge" | "ml.g6.4xlarge" | "ml.g6.8xlarge" | "ml.g6.12xlarge" | "ml.g6.16xlarge" | "ml.g6.24xlarge" | "ml.g6.48xlarge" | "ml.inf2.xlarge" | "ml.inf2.8xlarge" | "ml.inf2.24xlarge" | "ml.inf2.48xlarge" | "ml.trn1.2xlarge" | "ml.trn1.32xlarge" | "ml.trn1n.32xlarge") + attr_accessor optimization_environment: ::Hash[::String, ::String] + attr_accessor optimization_configs: ::Array[Types::OptimizationConfig] + attr_accessor output_config: Types::OptimizationJobOutputConfig + attr_accessor stopping_condition: Types::StoppingCondition + attr_accessor tags: ::Array[Types::Tag] + attr_accessor vpc_config: Types::OptimizationVpcConfig + SENSITIVE: [] + end + + class CreateOptimizationJobResponse + attr_accessor optimization_job_arn: ::String + SENSITIVE: [] + end + class CreatePipelineRequest attr_accessor pipeline_name: ::String attr_accessor pipeline_display_name: ::String @@ -2303,6 +2335,11 @@ module Aws::SageMaker SENSITIVE: [] end + class DeleteOptimizationJobRequest + attr_accessor optimization_job_name: ::String + SENSITIVE: [] + end + class DeletePipelineRequest attr_accessor pipeline_name: ::String attr_accessor client_request_token: ::String @@ -3453,6 +3490,32 @@ module Aws::SageMaker SENSITIVE: [] end + class DescribeOptimizationJobRequest + attr_accessor optimization_job_name: ::String + SENSITIVE: [] + end + + class DescribeOptimizationJobResponse + attr_accessor optimization_job_arn: ::String + attr_accessor optimization_job_status: ("INPROGRESS" | "COMPLETED" | "FAILED" | "STARTING" | "STOPPING" | "STOPPED") + attr_accessor optimization_start_time: ::Time + attr_accessor optimization_end_time: ::Time + attr_accessor creation_time: ::Time + attr_accessor last_modified_time: ::Time + attr_accessor failure_reason: ::String + attr_accessor optimization_job_name: ::String + attr_accessor model_source: Types::OptimizationJobModelSource + attr_accessor optimization_environment: ::Hash[::String, ::String] + attr_accessor deployment_instance_type: ("ml.p4d.24xlarge" | "ml.p4de.24xlarge" | "ml.p5.48xlarge" | "ml.g5.xlarge" | "ml.g5.2xlarge" | "ml.g5.4xlarge" | "ml.g5.8xlarge" | "ml.g5.12xlarge" | "ml.g5.16xlarge" | "ml.g5.24xlarge" | "ml.g5.48xlarge" | "ml.g6.xlarge" | "ml.g6.2xlarge" | "ml.g6.4xlarge" | "ml.g6.8xlarge" | "ml.g6.12xlarge" | "ml.g6.16xlarge" | "ml.g6.24xlarge" | "ml.g6.48xlarge" | "ml.inf2.xlarge" | "ml.inf2.8xlarge" | "ml.inf2.24xlarge" | "ml.inf2.48xlarge" | "ml.trn1.2xlarge" | "ml.trn1.32xlarge" | "ml.trn1n.32xlarge") + attr_accessor optimization_configs: ::Array[Types::OptimizationConfig] + attr_accessor output_config: Types::OptimizationJobOutputConfig + attr_accessor optimization_output: Types::OptimizationOutput + attr_accessor role_arn: ::String + attr_accessor stopping_condition: Types::StoppingCondition + attr_accessor vpc_config: Types::OptimizationVpcConfig + SENSITIVE: [] + end + class DescribePipelineDefinitionForExecutionRequest attr_accessor pipeline_execution_arn: ::String SENSITIVE: [] @@ -3885,6 +3948,7 @@ module Aws::SageMaker attr_accessor r_studio_server_pro_domain_settings: Types::RStudioServerProDomainSettings attr_accessor execution_role_identity_config: ("USER_PROFILE_NAME" | "DISABLED") attr_accessor docker_settings: Types::DockerSettings + attr_accessor amazon_q_settings: Types::AmazonQSettings SENSITIVE: [] end @@ -3893,6 +3957,7 @@ module Aws::SageMaker attr_accessor execution_role_identity_config: ("USER_PROFILE_NAME" | "DISABLED") attr_accessor security_group_ids: ::Array[::String] attr_accessor docker_settings: Types::DockerSettings + attr_accessor amazon_q_settings: Types::AmazonQSettings SENSITIVE: [] end @@ -6134,6 +6199,27 @@ module Aws::SageMaker SENSITIVE: [] end + class ListOptimizationJobsRequest + attr_accessor next_token: ::String + attr_accessor max_results: ::Integer + attr_accessor creation_time_after: ::Time + attr_accessor creation_time_before: ::Time + attr_accessor last_modified_time_after: ::Time + attr_accessor last_modified_time_before: ::Time + attr_accessor optimization_contains: ::String + attr_accessor name_contains: ::String + attr_accessor status_equals: ("INPROGRESS" | "COMPLETED" | "FAILED" | "STARTING" | "STOPPING" | "STOPPED") + attr_accessor sort_by: ("Name" | "CreationTime" | "Status") + attr_accessor sort_order: ("Ascending" | "Descending") + SENSITIVE: [] + end + + class ListOptimizationJobsResponse + attr_accessor optimization_job_summaries: ::Array[Types::OptimizationJobSummary] + attr_accessor next_token: ::String + SENSITIVE: [] + end + class ListPipelineExecutionStepsRequest attr_accessor pipeline_execution_arn: ::String attr_accessor next_token: ::String @@ -6636,6 +6722,12 @@ module Aws::SageMaker SENSITIVE: [] end + class ModelCompilationConfig + attr_accessor image: ::String + attr_accessor override_environment: ::Hash[::String, ::String] + SENSITIVE: [] + end + class ModelConfiguration attr_accessor inference_specification_name: ::String attr_accessor environment_parameters: ::Array[Types::EnvironmentParameter] @@ -6937,6 +7029,12 @@ module Aws::SageMaker SENSITIVE: [] end + class ModelQuantizationConfig + attr_accessor image: ::String + attr_accessor override_environment: ::Hash[::String, ::String] + SENSITIVE: [] + end + class ModelRegisterSettings attr_accessor status: ("ENABLED" | "DISABLED") attr_accessor cross_account_model_register_role_arn: ::String @@ -7291,6 +7389,66 @@ module Aws::SageMaker SENSITIVE: [] end + class OptimizationConfig + attr_accessor model_quantization_config: Types::ModelQuantizationConfig + attr_accessor model_compilation_config: Types::ModelCompilationConfig + attr_accessor unknown: untyped + SENSITIVE: [] + + class ModelQuantizationConfig < OptimizationConfig + end + class ModelCompilationConfig < OptimizationConfig + end + class Unknown < OptimizationConfig + end + end + + class OptimizationJobModelSource + attr_accessor s3: Types::OptimizationJobModelSourceS3 + SENSITIVE: [] + end + + class OptimizationJobModelSourceS3 + attr_accessor s3_uri: ::String + attr_accessor model_access_config: Types::OptimizationModelAccessConfig + SENSITIVE: [] + end + + class OptimizationJobOutputConfig + attr_accessor kms_key_id: ::String + attr_accessor s3_output_location: ::String + SENSITIVE: [] + end + + class OptimizationJobSummary + attr_accessor optimization_job_name: ::String + attr_accessor optimization_job_arn: ::String + attr_accessor creation_time: ::Time + attr_accessor optimization_job_status: ("INPROGRESS" | "COMPLETED" | "FAILED" | "STARTING" | "STOPPING" | "STOPPED") + attr_accessor optimization_start_time: ::Time + attr_accessor optimization_end_time: ::Time + attr_accessor last_modified_time: ::Time + attr_accessor deployment_instance_type: ("ml.p4d.24xlarge" | "ml.p4de.24xlarge" | "ml.p5.48xlarge" | "ml.g5.xlarge" | "ml.g5.2xlarge" | "ml.g5.4xlarge" | "ml.g5.8xlarge" | "ml.g5.12xlarge" | "ml.g5.16xlarge" | "ml.g5.24xlarge" | "ml.g5.48xlarge" | "ml.g6.xlarge" | "ml.g6.2xlarge" | "ml.g6.4xlarge" | "ml.g6.8xlarge" | "ml.g6.12xlarge" | "ml.g6.16xlarge" | "ml.g6.24xlarge" | "ml.g6.48xlarge" | "ml.inf2.xlarge" | "ml.inf2.8xlarge" | "ml.inf2.24xlarge" | "ml.inf2.48xlarge" | "ml.trn1.2xlarge" | "ml.trn1.32xlarge" | "ml.trn1n.32xlarge") + attr_accessor optimization_types: ::Array[::String] + SENSITIVE: [] + end + + class OptimizationModelAccessConfig + attr_accessor accept_eula: bool + SENSITIVE: [] + end + + class OptimizationOutput + attr_accessor recommended_inference_image: ::String + SENSITIVE: [] + end + + class OptimizationVpcConfig + attr_accessor security_group_ids: ::Array[::String] + attr_accessor subnets: ::Array[::String] + SENSITIVE: [] + end + class OutputConfig attr_accessor s3_output_location: ::String attr_accessor target_device: ("lambda" | "ml_m4" | "ml_m5" | "ml_m6g" | "ml_c4" | "ml_c5" | "ml_c6g" | "ml_p2" | "ml_p3" | "ml_g4dn" | "ml_inf1" | "ml_inf2" | "ml_trn1" | "ml_eia2" | "jetson_tx1" | "jetson_tx2" | "jetson_nano" | "jetson_xavier" | "rasp3b" | "rasp4b" | "imx8qm" | "deeplens" | "rk3399" | "rk3288" | "aisage" | "sbe_c" | "qcs605" | "qcs603" | "sitara_am57x" | "amba_cv2" | "amba_cv22" | "amba_cv25" | "x86_win32" | "x86_win64" | "coreml" | "jacinto_tda4vm" | "imx8mplus") @@ -8501,6 +8659,11 @@ module Aws::SageMaker SENSITIVE: [] end + class StopOptimizationJobRequest + attr_accessor optimization_job_name: ::String + SENSITIVE: [] + end + class StopPipelineExecutionRequest attr_accessor pipeline_execution_arn: ::String attr_accessor client_request_token: ::String