From 07e1c5ff2d3f5ca39e86ab300744f4f988d53cac Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Wed, 6 Oct 2021 11:25:50 -0700 Subject: [PATCH] Release v1.40.57 (2021-10-06) (#4125) Release v1.40.57 (2021-10-06) === ### Service Client Updates * `service/amplifybackend`: Updates service API and documentation * `service/fsx`: Updates service API and documentation * `service/kendra`: Updates service API and documentation * Amazon Kendra now supports integration with AWS SSO * `service/sagemaker`: Updates service API and documentation * This release adds a new TrainingInputMode FastFile for SageMaker Training APIs. --- CHANGELOG.md | 11 + aws/endpoints/defaults.go | 73 +++++ aws/version.go | 2 +- .../apis/amplifybackend/2020-08-11/api-2.json | 8 + .../amplifybackend/2020-08-11/docs-2.json | 1 + models/apis/fsx/2018-03-01/api-2.json | 15 +- models/apis/fsx/2018-03-01/docs-2.json | 16 +- models/apis/kendra/2019-02-03/api-2.json | 23 +- models/apis/kendra/2019-02-03/docs-2.json | 64 +++-- models/apis/sagemaker/2017-07-24/api-2.json | 3 +- models/apis/sagemaker/2017-07-24/docs-2.json | 28 +- models/endpoints/endpoints.json | 69 +++++ service/amplifybackend/api.go | 8 + service/fsx/api.go | 79 ++++-- service/kendra/api.go | 232 ++++++++++++---- service/sagemaker/api.go | 250 +++++++++++++----- 16 files changed, 701 insertions(+), 181 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b4f760f4c0..cf74a385caf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.40.57 (2021-10-06) +=== + +### Service Client Updates +* `service/amplifybackend`: Updates service API and documentation +* `service/fsx`: Updates service API and documentation +* `service/kendra`: Updates service API and documentation + * Amazon Kendra now supports integration with AWS SSO +* `service/sagemaker`: Updates service API and documentation + * This release adds a new TrainingInputMode FastFile for SageMaker Training APIs. + Release v1.40.56 (2021-10-05) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 4de91f08ccf..b91240af426 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -2075,6 +2075,30 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "databrew": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "dataexchange": service{ Endpoints: endpoints{ @@ -4245,6 +4269,36 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "kendra": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -5314,6 +5368,12 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "sa-east-1": endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, "us-east-1": endpoint{ Hostname: "oidc.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -8373,6 +8433,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "databrew": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "dax": service{ Endpoints: endpoints{ @@ -9616,6 +9683,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "databrew": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "datasync": service{ Endpoints: endpoints{ diff --git a/aws/version.go b/aws/version.go index 1f0662dade5..c360ba46163 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.40.56" +const SDKVersion = "1.40.57" diff --git a/models/apis/amplifybackend/2020-08-11/api-2.json b/models/apis/amplifybackend/2020-08-11/api-2.json index b6a98a469e2..d7d5cc81a1e 100644 --- a/models/apis/amplifybackend/2020-08-11/api-2.json +++ b/models/apis/amplifybackend/2020-08-11/api-2.json @@ -2097,6 +2097,10 @@ "GetBackendRespObj" : { "type" : "structure", "members" : { + "AmplifyFeatureFlags" : { + "shape" : "__string", + "locationName" : "amplifyFeatureFlags" + }, "AmplifyMetaConfig" : { "shape" : "__string", "locationName" : "amplifyMetaConfig" @@ -2127,6 +2131,10 @@ "GetBackendResponse" : { "type" : "structure", "members" : { + "AmplifyFeatureFlags" : { + "shape" : "__string", + "locationName" : "amplifyFeatureFlags" + }, "AmplifyMetaConfig" : { "shape" : "__string", "locationName" : "amplifyMetaConfig" diff --git a/models/apis/amplifybackend/2020-08-11/docs-2.json b/models/apis/amplifybackend/2020-08-11/docs-2.json index eea578b8391..799944a2c6a 100644 --- a/models/apis/amplifybackend/2020-08-11/docs-2.json +++ b/models/apis/amplifybackend/2020-08-11/docs-2.json @@ -621,6 +621,7 @@ "GetBackendAuthRespObj$Error" : "

If the request fails, this error is returned.

", "GetBackendAuthRespObj$ResourceName" : "

The name of this resource.

", "GetBackendReqObj$BackendEnvironmentName" : "

The name of the backend environment.

", + "GetBackendRespObj$AmplifyFeatureFlags" : "

A stringified version of the cli.json file for your Amplify project.

", "GetBackendRespObj$AmplifyMetaConfig" : "

A stringified version of the current configs for your Amplify project.

", "GetBackendRespObj$AppId" : "

The app ID.

", "GetBackendRespObj$AppName" : "

The name of the app.

", diff --git a/models/apis/fsx/2018-03-01/api-2.json b/models/apis/fsx/2018-03-01/api-2.json index f8bae20e685..553605bb7a0 100644 --- a/models/apis/fsx/2018-03-01/api-2.json +++ b/models/apis/fsx/2018-03-01/api-2.json @@ -847,7 +847,8 @@ "WindowsConfiguration":{"shape":"CreateFileSystemWindowsConfiguration"}, "LustreConfiguration":{"shape":"CreateFileSystemLustreConfiguration"}, "StorageType":{"shape":"StorageType"}, - "KmsKeyId":{"shape":"KmsKeyId"} + "KmsKeyId":{"shape":"KmsKeyId"}, + "FileSystemTypeVersion":{"shape":"FileSystemTypeVersion"} } }, "CreateFileSystemFromBackupResponse":{ @@ -913,7 +914,8 @@ "KmsKeyId":{"shape":"KmsKeyId"}, "WindowsConfiguration":{"shape":"CreateFileSystemWindowsConfiguration"}, "LustreConfiguration":{"shape":"CreateFileSystemLustreConfiguration"}, - "OntapConfiguration":{"shape":"CreateFileSystemOntapConfiguration"} + "OntapConfiguration":{"shape":"CreateFileSystemOntapConfiguration"}, + "FileSystemTypeVersion":{"shape":"FileSystemTypeVersion"} } }, "CreateFileSystemResponse":{ @@ -1518,7 +1520,8 @@ "WindowsConfiguration":{"shape":"WindowsFileSystemConfiguration"}, "LustreConfiguration":{"shape":"LustreFileSystemConfiguration"}, "AdministrativeActions":{"shape":"AdministrativeActions"}, - "OntapConfiguration":{"shape":"OntapFileSystemConfiguration"} + "OntapConfiguration":{"shape":"OntapFileSystemConfiguration"}, + "FileSystemTypeVersion":{"shape":"FileSystemTypeVersion"} } }, "FileSystemAdministratorsGroupName":{ @@ -1596,6 +1599,12 @@ "ONTAP" ] }, + "FileSystemTypeVersion":{ + "type":"string", + "max":20, + "min":1, + "pattern":"^[0-9](\\.[0-9]*)*$" + }, "FileSystems":{ "type":"list", "member":{"shape":"FileSystem"}, diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index 5d59649afb8..b7f63bae463 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -756,9 +756,9 @@ "DnsIps": { "base": null, "refs": { - "SelfManagedActiveDirectoryAttributes$DnsIps": "

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.

", - "SelfManagedActiveDirectoryConfiguration$DnsIps": "

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.

", - "SelfManagedActiveDirectoryConfigurationUpdates$DnsIps": "

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.

" + "SelfManagedActiveDirectoryAttributes$DnsIps": "

A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.

", + "SelfManagedActiveDirectoryConfiguration$DnsIps": "

A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.

", + "SelfManagedActiveDirectoryConfigurationUpdates$DnsIps": "

A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.

" } }, "DriveCacheType": { @@ -915,6 +915,14 @@ "FileSystem$FileSystemType": "

The type of Amazon FSx file system, which can be LUSTRE, WINDOWS, or ONTAP.

" } }, + "FileSystemTypeVersion": { + "base": null, + "refs": { + "CreateFileSystemFromBackupRequest$FileSystemTypeVersion": "

Sets the version for the Amazon FSx for Lustre file system you're creating from a backup. Valid values are 2.10 and 2.12.

You don't need to specify FileSystemTypeVersion because it will be applied using the backup's FileSystemTypeVersion setting. If you choose to specify FileSystemTypeVersion when creating from backup, the value must match the backup's FileSystemTypeVersion setting.

", + "CreateFileSystemRequest$FileSystemTypeVersion": "

Sets the version of the Amazon FSx for Lustre file system you're creating. Valid values are 2.10 and 2.12.

Default value is 2.10.

", + "FileSystem$FileSystemTypeVersion": "

The version of your Amazon FSx for Lustre file system, either 2.10 or 2.12.

" + } + }, "FileSystems": { "base": "

A list of file systems.

", "refs": { @@ -1356,7 +1364,7 @@ } }, "SelfManagedActiveDirectoryConfiguration": { - "base": "

The configuration that Amazon FSx uses to join a Amazon FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see Using Amazon FSx with your self-managed Microsoft Active Directory or Managing SVMs.

", + "base": "

The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see Using Amazon FSx with your self-managed Microsoft Active Directory or Managing SVMs.

", "refs": { "CreateFileSystemWindowsConfiguration$SelfManagedActiveDirectoryConfiguration": null, "CreateSvmActiveDirectoryConfiguration$SelfManagedActiveDirectoryConfiguration": null diff --git a/models/apis/kendra/2019-02-03/api-2.json b/models/apis/kendra/2019-02-03/api-2.json index 6d1cffbe1dc..f0dd8ccdfe6 100644 --- a/models/apis/kendra/2019-02-03/api-2.json +++ b/models/apis/kendra/2019-02-03/api-2.json @@ -1279,7 +1279,8 @@ }, "Tags":{"shape":"TagList"}, "UserTokenConfigurations":{"shape":"UserTokenConfigurationList"}, - "UserContextPolicy":{"shape":"UserContextPolicy"} + "UserContextPolicy":{"shape":"UserContextPolicy"}, + "UserGroupResolutionConfiguration":{"shape":"UserGroupResolutionConfiguration"} } }, "CreateIndexResponse":{ @@ -1716,7 +1717,8 @@ "ErrorMessage":{"shape":"ErrorMessage"}, "CapacityUnits":{"shape":"CapacityUnitsConfiguration"}, "UserTokenConfigurations":{"shape":"UserTokenConfigurationList"}, - "UserContextPolicy":{"shape":"UserContextPolicy"} + "UserContextPolicy":{"shape":"UserContextPolicy"}, + "UserGroupResolutionConfiguration":{"shape":"UserGroupResolutionConfiguration"} } }, "DescribePrincipalMappingRequest":{ @@ -3657,7 +3659,8 @@ "DocumentMetadataConfigurationUpdates":{"shape":"DocumentMetadataConfigurationList"}, "CapacityUnits":{"shape":"CapacityUnitsConfiguration"}, "UserTokenConfigurations":{"shape":"UserTokenConfigurationList"}, - "UserContextPolicy":{"shape":"UserContextPolicy"} + "UserContextPolicy":{"shape":"UserContextPolicy"}, + "UserGroupResolutionConfiguration":{"shape":"UserGroupResolutionConfiguration"} } }, "UpdateQuerySuggestionsBlockListRequest":{ @@ -3737,6 +3740,20 @@ "USER_TOKEN" ] }, + "UserGroupResolutionConfiguration":{ + "type":"structure", + "required":["UserGroupResolutionMode"], + "members":{ + "UserGroupResolutionMode":{"shape":"UserGroupResolutionMode"} + } + }, + "UserGroupResolutionMode":{ + "type":"string", + "enum":[ + "AWS_SSO", + "NONE" + ] + }, "UserId":{ "type":"string", "max":1024, diff --git a/models/apis/kendra/2019-02-03/docs-2.json b/models/apis/kendra/2019-02-03/docs-2.json index e1db3fc9e8e..83e6ae16f1c 100644 --- a/models/apis/kendra/2019-02-03/docs-2.json +++ b/models/apis/kendra/2019-02-03/docs-2.json @@ -6,7 +6,7 @@ "BatchGetDocumentStatus": "

Returns the indexing status for one or more documents submitted with the BatchPutDocument operation.

When you use the BatchPutDocument operation, documents are indexed asynchronously. You can use the BatchGetDocumentStatus operation to get the current status of a list of documents so that you can determine if they have been successfully indexed.

You can also use the BatchGetDocumentStatus operation to check the status of the BatchDeleteDocument operation. When a document is deleted from the index, Amazon Kendra returns NOT_FOUND as the status.

", "BatchPutDocument": "

Adds one or more documents to an index.

The BatchPutDocument operation enables you to ingest inline documents or a set of documents stored in an Amazon S3 bucket. Use this operation to ingest your text and unstructured text into an index, add custom attributes to the documents, and to attach an access control list to the documents added to the index.

The documents are indexed asynchronously. You can see the progress of the batch using Amazon Web Services CloudWatch. Any error messages related to processing the batch are sent to your Amazon Web Services CloudWatch log.

", "ClearQuerySuggestions": "

Clears existing query suggestions from an index.

This deletes existing suggestions only, not the queries in the query log. After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. If you do not see any new suggestions, then please allow Amazon Kendra to collect enough queries to learn new suggestions.

", - "CreateDataSource": "

Creates a data source that you use to with an Amazon Kendra index.

You specify a name, data source connector type and description for your data source. You also specify configuration information such as document metadata (author, source URI, and so on) and user context information.

CreateDataSource is a synchronous operation. The operation returns 200 if the data source was successfully created. Otherwise, an exception is raised.

", + "CreateDataSource": "

Creates a data source that you want to use with an Amazon Kendra index.

You specify a name, data source connector type and description for your data source. You also specify configuration information for the data source connector.

CreateDataSource is a synchronous operation. The operation returns 200 if the data source was successfully created. Otherwise, an exception is raised.

", "CreateFaq": "

Creates an new set of frequently asked question (FAQ) questions and answers.

", "CreateIndex": "

Creates a new Amazon Kendra index. Index creation is an asynchronous operation. To determine if index creation has completed, check the Status field returned from a call to DescribeIndex. The Status field is set to ACTIVE when the index is ready to use.

Once the index is active you can index your documents using the BatchPutDocument operation or using one of the supported data sources.

", "CreateQuerySuggestionsBlockList": "

Creates a block list to exlcude certain queries from suggestions.

Any query that contains words or phrases specified in the block list is blocked or filtered out from being shown as a suggestion.

You need to provide the file location of your block list text file in your S3 bucket. In your text file, enter each block word or phrase on a separate line.

For information on the current quota limits for block lists, see Quotas for Amazon Kendra.

", @@ -33,7 +33,7 @@ "ListQuerySuggestionsBlockLists": "

Lists the block lists used for query suggestions for an index.

For information on the current quota limits for block lists, see Quotas for Amazon Kendra.

", "ListTagsForResource": "

Gets a list of tags associated with a specified resource. Indexes, FAQs, and data sources can have tags associated with them.

", "ListThesauri": "

Lists the Amazon Kendra thesauri associated with an index.

", - "PutPrincipalMapping": "

Maps users to their groups. You can also map sub groups to groups. For example, the group \"Company Intellectual Property Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include their own list of users or people who work in these teams. Only users who work in research and engineering, and therefore belong in the intellectual property group, can see top-secret company documents in their search results.

You map users to their groups when you want to filter search results for different users based on their group’s access to documents. For more information on filtering search results for different users, see Filtering on user context.

If more than five PUT actions for a group are currently processing, a validation exception is thrown.

", + "PutPrincipalMapping": "

Maps users to their groups so that you only need to provide the user ID when you issue the query.

You can also map sub groups to groups. For example, the group \"Company Intellectual Property Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include their own list of users or people who work in these teams. Only users who work in research and engineering, and therefore belong in the intellectual property group, can see top-secret company documents in their search results.

You map users to their groups when you want to filter search results for different users based on their group’s access to documents. For more information on filtering search results for different users, see Filtering on user context.

If more than five PUT actions for a group are currently processing, a validation exception is thrown.

", "Query": "

Searches an active index. Use this API to search your documents using query. The Query operation enables to do faceted search and to filter results based on document attributes.

It also enables you to provide user context that Amazon Kendra uses to enforce document access control in the search results.

Amazon Kendra searches your index for text content and question and answer (FAQ) content. By default the response contains three types of results.

You can specify that the query return only one type of result using the QueryResultTypeConfig parameter.

Each query returns the 100 most relevant results.

", "StartDataSourceSyncJob": "

Starts a synchronization job for a data source. If a synchronization job is already in progress, Amazon Kendra returns a ResourceInUseException exception.

", "StopDataSourceSyncJob": "

Stops a running synchronization job. You can't stop a scheduled synchronization job.

", @@ -97,7 +97,7 @@ } }, "AttributeFilter": { - "base": "

Provides filtering the query results based on document attributes.

When you use the AndAllFilters or OrAllFilters, filters you can use 2 layers under the first attribute filter. For example, you can use:

<AndAllFilters>

  1. <OrAllFilters>

  2. <EqualTo>

If you use more than 2 layers, you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\"

If you use more than 10 attribute filters, you receive a ValidationException exception with the message \"AttributeFilter cannot have a length of more than 10\".

", + "base": "

Provides filtering the query results based on document attributes.

When you use the AndAllFilters or OrAllFilters, filters you can use 2 layers under the first attribute filter. For example, you can use:

<AndAllFilters>

  1. <OrAllFilters>

  2. <EqualTo>

If you use more than 2 layers, you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\"

If you use more than 10 attribute filters in a given list for AndAllFilters or OrAllFilters, you receive a ValidationException with the message \"AttributeFilter cannot have a length of more than 10\".

", "refs": { "AttributeFilter$NotFilter": "

Performs a logical NOT operation on all supplied filters.

", "AttributeFilterList$member": null, @@ -266,7 +266,7 @@ "CreateFaqRequest$ClientToken": "

A token that you provide to identify the request to create a FAQ. Multiple calls to the CreateFaqRequest operation with the same client token will create only one FAQ.

", "CreateIndexRequest$ClientToken": "

A token that you provide to identify the request to create an index. Multiple calls to the CreateIndex operation with the same client token will create only one index.

", "CreateQuerySuggestionsBlockListRequest$ClientToken": "

A token that you provide to identify the request to create a query suggestions block list.

", - "CreateThesaurusRequest$ClientToken": "

A token that you provide to identify the request to create a thesaurus. Multiple calls to the CreateThesaurus operation with the same client token will create only one index.

" + "CreateThesaurusRequest$ClientToken": "

A token that you provide to identify the request to create a thesaurus. Multiple calls to the CreateThesaurus operation with the same client token will create only one thesaurus.

" } }, "ColumnConfiguration": { @@ -849,10 +849,10 @@ "AttributeFilter$EqualsTo": "

Performs an equals operation on two document attributes.

", "AttributeFilter$ContainsAll": "

Returns true when a document contains all of the specified document attributes. This filter is only applicable to StringListValue metadata.

", "AttributeFilter$ContainsAny": "

Returns true when a document contains any of the specified document attributes. This filter is only applicable to StringListValue metadata.

", - "AttributeFilter$GreaterThan": "

Performs a greater than operation on two document attributes. Use with a document attribute of type Integer or Long.

", - "AttributeFilter$GreaterThanOrEquals": "

Performs a greater or equals than operation on two document attributes. Use with a document attribute of type Integer or Long.

", - "AttributeFilter$LessThan": "

Performs a less than operation on two document attributes. Use with a document attribute of type Integer or Long.

", - "AttributeFilter$LessThanOrEquals": "

Performs a less than or equals operation on two document attributes. Use with a document attribute of type Integer or Long.

", + "AttributeFilter$GreaterThan": "

Performs a greater than operation on two document attributes. Use with a document attribute of type Date or Long.

", + "AttributeFilter$GreaterThanOrEquals": "

Performs a greater or equals than operation on two document attributes. Use with a document attribute of type Date or Long.

", + "AttributeFilter$LessThan": "

Performs a less than operation on two document attributes. Use with a document attribute of type Date or Long.

", + "AttributeFilter$LessThanOrEquals": "

Performs a less than or equals operation on two document attributes. Use with a document attribute of type Date or Long.

", "DocumentAttributeList$member": null } }, @@ -1572,7 +1572,7 @@ "MaxResultsIntegerForListPrincipalsRequest": { "base": null, "refs": { - "ListGroupsOlderThanOrderingIdRequest$MaxResults": "

The maximum results shown for a list of groups that are mapped to users before a given ordering or timestamp identifier.

" + "ListGroupsOlderThanOrderingIdRequest$MaxResults": "

The maximum number of returned groups that are mapped to users before a given ordering or timestamp identifier.

" } }, "MaxResultsIntegerForListQuerySuggestionsBlockLists": { @@ -1657,14 +1657,14 @@ "NextToken": { "base": null, "refs": { - "ListDataSourceSyncJobsRequest$NextToken": "

If the result of the previous request to GetDataSourceSyncJobHistory was truncated, include the NextToken to fetch the next set of jobs.

", - "ListDataSourceSyncJobsResponse$NextToken": "

The GetDataSourceSyncJobHistory operation returns a page of vocabularies at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Kendra returns the NextPage token. Include the token in the next request to the GetDataSourceSyncJobHistory operation to return in the next page of jobs.

", + "ListDataSourceSyncJobsRequest$NextToken": "

If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of jobs.

", + "ListDataSourceSyncJobsResponse$NextToken": "

If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of jobs.

", "ListDataSourcesRequest$NextToken": "

If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of data sources (DataSourceSummaryItems).

", "ListDataSourcesResponse$NextToken": "

If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of data sources.

", - "ListFaqsRequest$NextToken": "

If the result of the previous request to ListFaqs was truncated, include the NextToken to fetch the next set of FAQs.

", - "ListFaqsResponse$NextToken": "

The ListFaqs operation returns a page of FAQs at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Kendra returns the NextPage token. Include the token in the next request to the ListFaqs operation to return the next page of FAQs.

", - "ListGroupsOlderThanOrderingIdRequest$NextToken": "

The next items in the list of groups that go beyond the maximum.

", - "ListGroupsOlderThanOrderingIdResponse$NextToken": "

The next items in the list of groups that go beyond the maximum.

", + "ListFaqsRequest$NextToken": "

If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of FAQs.

", + "ListFaqsResponse$NextToken": "

If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of FAQs.

", + "ListGroupsOlderThanOrderingIdRequest$NextToken": "

If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of groups that are mapped to users before a given ordering or timestamp identifier.

", + "ListGroupsOlderThanOrderingIdResponse$NextToken": "

If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of groups that are mapped to users before a given ordering or timestamp identifier.

", "ListIndicesRequest$NextToken": "

If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of indexes (DataSourceSummaryItems).

", "ListIndicesResponse$NextToken": "

If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of indexes.

", "ListQuerySuggestionsBlockListsRequest$NextToken": "

If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of block lists (BlockListSummaryItems).

", @@ -1996,7 +1996,7 @@ "DescribeQuerySuggestionsBlockListResponse$SourceS3Path": "

Shows the current S3 path to your block list text file in your S3 bucket.

Each block word or phrase should be on a separate line in a text file.

For information on the current quota limits for block lists, see Quotas for Amazon Kendra.

", "DescribeThesaurusResponse$SourceS3Path": null, "Document$S3Path": null, - "GroupMembers$S3PathforGroupMembers": "

If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000.

", + "GroupMembers$S3PathforGroupMembers": "

If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000.

You can download this example S3 file that uses the correct format for listing group members. Note, dataSourceId is optional. The value of type for a group is always GROUP and for a user it is always USER.

", "OneDriveUsers$OneDriveUserS3Path": "

The S3 bucket location of a file containing a list of users whose documents should be indexed.

", "SharePointConfiguration$SslCertificateS3Path": null, "UpdateQuerySuggestionsBlockListRequest$SourceS3Path": "

The S3 path where your block list text file sits in S3.

If you update your block list and provide the same path to the block list text file in S3, then Amazon Kendra reloads the file to refresh the block list. Amazon Kendra does not automatically refresh your block list. You need to call the UpdateQuerySuggestionsBlockList API to refresh you block list.

If you update your block list, then Amazon Kendra asynchronously refreshes all query suggestions with the latest content in the S3 file. This means changes might not take effect immediately.

", @@ -2474,7 +2474,7 @@ } }, "ThesaurusSummary": { - "base": "

An array of summary information for one or more thesauruses.

", + "base": "

An array of summary information for a thesaurus or multiple thesauri.

", "refs": { "ThesaurusSummaryItems$member": null } @@ -2482,7 +2482,7 @@ "ThesaurusSummaryItems": { "base": null, "refs": { - "ListThesauriResponse$ThesaurusSummaryItems": "

An array of summary information for one or more thesauruses.

" + "ListThesauriResponse$ThesaurusSummaryItems": "

An array of summary information for a thesaurus or multiple thesauri.

" } }, "ThrottlingException": { @@ -2510,7 +2510,7 @@ "DescribeFaqResponse$UpdatedAt": "

The date and time that the FAQ was last updated.

", "DescribeIndexResponse$CreatedAt": "

The Unix datetime that the index was created.

", "DescribeIndexResponse$UpdatedAt": "

The Unix datetime that the index was last updated.

", - "DescribeQuerySuggestionsBlockListResponse$CreatedAt": "

Shows the date-time a block list for query suggestions was last created.

", + "DescribeQuerySuggestionsBlockListResponse$CreatedAt": "

Shows the date-time a block list for query suggestions was created.

", "DescribeQuerySuggestionsBlockListResponse$UpdatedAt": "

Shows the date-time a block list for query suggestions was last updated.

", "DescribeQuerySuggestionsConfigResponse$LastSuggestionsBuildTime": "

Shows the date-time query suggestions for an index was last updated.

", "DescribeQuerySuggestionsConfigResponse$LastClearTime": "

Shows the date-time query suggestions for an index was last cleared.

After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. Amazon Kendra only considers re-occurences of a query from the time you cleared suggestions.

", @@ -2589,9 +2589,9 @@ } }, "Urls": { - "base": "

Provides the configuration information of the URLs to crawl.

When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.

", + "base": "

Provides the configuration information of the URLs to crawl.

You can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling.

When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.

", "refs": { - "WebCrawlerConfiguration$Urls": "

Specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl.

You can include website subdomains. You can list up to 100 seed URLs and up to three sitemap URLs.

When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.

" + "WebCrawlerConfiguration$Urls": "

Specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl.

You can include website subdomains. You can list up to 100 seed URLs and up to three sitemap URLs.

You can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling.

When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.

" } }, "UserAccount": { @@ -2601,17 +2601,31 @@ } }, "UserContext": { - "base": "

Provides information about the user context for a Amazon Kendra index.

This is used for filtering search results for different users based on their access to documents.

You provide one of the following:

If you provide both, an exception is thrown.

", + "base": "

Provides information about the user context for an Amazon Kendra index.

This is used for filtering search results for different users based on their access to documents.

You provide one of the following:

If you provide both, an exception is thrown.

", "refs": { - "QueryRequest$UserContext": "

The user context token.

" + "QueryRequest$UserContext": "

The user context token or user and group information.

" } }, "UserContextPolicy": { "base": null, "refs": { - "CreateIndexRequest$UserContextPolicy": "

The user context policy.

ATTRIBUTE_FILTER

All indexed content is searchable and displayable for all users. If there is an access control list, it is ignored. You can filter on user and group attributes.

USER_TOKEN

Enables SSO and token-based user access control. All documents with no access control and all documents accessible to the user will be searchable and displayable.

", + "CreateIndexRequest$UserContextPolicy": "

The user context policy.

ATTRIBUTE_FILTER

All indexed content is searchable and displayable for all users. If you want to filter search results on user context, you can use the attribute filters of _user_id and _group_ids or you can provide user and group information in UserContext.

USER_TOKEN

Enables token-based user access control to filter search results on user context. All documents with no access control and all documents accessible to the user will be searchable and displayable.

", "DescribeIndexResponse$UserContextPolicy": "

The user context policy for the Amazon Kendra index.

", - "UpdateIndexRequest$UserContextPolicy": "

The user user token context policy.

" + "UpdateIndexRequest$UserContextPolicy": "

The user context policy.

" + } + }, + "UserGroupResolutionConfiguration": { + "base": "

Provides the configuration information to fetch access levels of groups and users from an AWS Single Sign-On identity source. This is useful for setting up user context filtering, where Amazon Kendra filters search results for different users based on their group's access to documents. You can also map your users to their groups for user context filtering using the PutPrincipalMapping operation.

To set up an AWS SSO identity source in the console to use with Amazon Kendra, see Getting started with an AWS SSO identity source. You must also grant the required permissions to use AWS SSO with Amazon Kendra. For more information, see IAM roles for AWS Single Sign-On.

", + "refs": { + "CreateIndexRequest$UserGroupResolutionConfiguration": "

Enables fetching access levels of groups and users from an AWS Single Sign-On identity source. To configure this, see UserGroupResolutionConfiguration.

", + "DescribeIndexResponse$UserGroupResolutionConfiguration": "

Shows whether you have enabled the configuration for fetching access levels of groups and users from an AWS Single Sign-On identity source.

", + "UpdateIndexRequest$UserGroupResolutionConfiguration": "

Enables fetching access levels of groups and users from an AWS Single Sign-On identity source. To configure this, see UserGroupResolutionConfiguration.

" + } + }, + "UserGroupResolutionMode": { + "base": null, + "refs": { + "UserGroupResolutionConfiguration$UserGroupResolutionMode": "

The identity store provider (mode) you want to use to fetch access levels of groups and users. AWS Single Sign-On is currently the only available mode. Your users and groups must exist in an AWS SSO identity source in order to use this mode.

" } }, "UserId": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index e2138151456..024d6f152ef 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -14092,7 +14092,8 @@ "type":"string", "enum":[ "Pipe", - "File" + "File", + "FastFile" ] }, "TrainingInstanceCount":{ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 20ec08380d8..f351485d731 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -18,7 +18,7 @@ "CreateDeviceFleet": "

Creates a device fleet.

", "CreateDomain": "

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

EFS storage

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption.

VPC configuration

All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to Studio. The following options are available:

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a SageMaker Studio app successfully.

For more information, see Connect SageMaker Studio Notebooks to Resources in a VPC.

", "CreateEdgePackagingJob": "

Starts a SageMaker Edge Manager model packaging job. Edge Manager will use the model artifacts from the Amazon Simple Storage Service bucket that you specify. After the model has been packaged, Amazon SageMaker saves the resulting artifacts to an S3 bucket that you specify.

", - "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see the Create Endpoint example notebook.

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your IAM user account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

", + "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see the Create Endpoint example notebook.

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your IAM user account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

", "CreateEndpointConfig": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

", "CreateExperiment": "

Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.

The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.

When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.

To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API.

To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.

", "CreateFeatureGroup": "

Create a new FeatureGroup. A FeatureGroup is a group of Features defined in the FeatureStore to describe a Record.

The FeatureGroup defines the schema and features contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check Amazon Web Services service quotas to see the FeatureGroups quota for your Amazon Web Services account.

You must include at least one of OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup.

", @@ -1039,7 +1039,7 @@ "BillableTimeInSeconds": { "base": null, "refs": { - "DescribeTrainingJobResponse$BillableTimeInSeconds": "

The billable time in seconds. Billable time refers to the absolute wall-clock time.

Multiply BillableTimeInSeconds by the number of instances (InstanceCount) in your training cluster to get the total compute time Amazon SageMaker will bill you if you run distributed training. The formula is as follows: BillableTimeInSeconds * InstanceCount .

You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings is 80%.

", + "DescribeTrainingJobResponse$BillableTimeInSeconds": "

The billable time in seconds. Billable time refers to the absolute wall-clock time.

Multiply BillableTimeInSeconds by the number of instances (InstanceCount) in your training cluster to get the total compute time SageMaker will bill you if you run distributed training. The formula is as follows: BillableTimeInSeconds * InstanceCount .

You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings is 80%.

", "TrainingJob$BillableTimeInSeconds": "

The billable time in seconds.

" } }, @@ -4905,7 +4905,7 @@ "JoinSource": { "base": null, "refs": { - "DataProcessing$JoinSource": "

Specifies the source of the data to join with the transformed data. The valid values are None and Input. The default value is None, which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource to Input. You can specify OutputFilter as an additional filter to select a portion of the joined dataset and store it in the output file.

For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput key and the results are stored in SageMakerOutput.

For CSV data, Amazon SageMaker takes each row as a JSON array and joins the transformed data with the input by appending each transformed row to the end of the input. The joined data has the original input data followed by the transformed data and the output is a CSV file.

For information on how joining in applied, see Workflow for Associating Inferences with Input Records.

" + "DataProcessing$JoinSource": "

Specifies the source of the data to join with the transformed data. The valid values are None and Input. The default value is None, which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource to Input. You can specify OutputFilter as an additional filter to select a portion of the joined dataset and store it in the output file.

For JSON or JSONLines objects, such as a JSON array, SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput key and the results are stored in SageMakerOutput.

For CSV data, SageMaker takes each row as a JSON array and joins the transformed data with the input by appending each transformed row to the end of the input. The joined data has the original input data followed by the transformed data and the output is a CSV file.

For information on how joining in applied, see Workflow for Associating Inferences with Input Records.

" } }, "JsonContentType": { @@ -5851,7 +5851,7 @@ "MaxAutoMLJobRuntimeInSeconds": { "base": null, "refs": { - "AutoMLJobCompletionCriteria$MaxAutoMLJobRuntimeInSeconds": "

The maximum runtime, in seconds, an AutoML job has to complete.

" + "AutoMLJobCompletionCriteria$MaxAutoMLJobRuntimeInSeconds": "

The maximum runtime, in seconds, an AutoML job has to complete.

If an AutoML job exceeds the maximum runtime, the job is stopped automatically and its processing is ended gracefully. The AutoML job identifies the best model whose training was completed and marks it as the best-performing model. Any unfinished steps of the job, such as automatic one-click Autopilot model deployment, will not be completed.

" } }, "MaxCandidates": { @@ -5976,7 +5976,7 @@ "MaxRuntimePerTrainingJobInSeconds": { "base": null, "refs": { - "AutoMLJobCompletionCriteria$MaxRuntimePerTrainingJobInSeconds": "

The maximum time, in seconds, a training job is allowed to run as part of an AutoML job.

" + "AutoMLJobCompletionCriteria$MaxRuntimePerTrainingJobInSeconds": "

The maximum time, in seconds, that each training job is allowed to run as part of a hyperparameter tuning job. For more information, see the used by the action.

" } }, "MaxWaitTimeInSeconds": { @@ -7220,7 +7220,7 @@ "OfflineStoreConfig": { "base": "

The configuration of an OfflineStore.

Provide an OfflineStoreConfig in a request to CreateFeatureGroup to create an OfflineStore.

To encrypt an OfflineStore using at rest data encryption, specify Amazon Web Services Key Management Service (KMS) key ID, or KMSKeyId, in S3StorageConfig.

", "refs": { - "CreateFeatureGroupRequest$OfflineStoreConfig": "

Use this to configure an OfflineFeatureStore. This parameter allows you to specify:

To learn more about this parameter, see OfflineStoreConfig.

", + "CreateFeatureGroupRequest$OfflineStoreConfig": "

Use this to configure an OfflineFeatureStore. This parameter allows you to specify:

To learn more about this parameter, see OfflineStoreConfig.

", "DescribeFeatureGroupResponse$OfflineStoreConfig": "

The configuration of the OfflineStore, inducing the S3 location of the OfflineStore, Amazon Web Services Glue or Amazon Web Services Hive data catalogue configurations, and the security configuration.

", "FeatureGroup$OfflineStoreConfig": null } @@ -7876,7 +7876,7 @@ "ProcessingS3InputMode": { "base": null, "refs": { - "EndpointInput$S3InputMode": "

Whether the Pipe or File is used as the input mode for transfering data for the monitoring job. Pipe mode is recommended for large datasets. File mode is useful for small files that fit in memory. Defaults to File.

", + "EndpointInput$S3InputMode": "

Whether the Pipe or File is used as the input mode for transferring data for the monitoring job. Pipe mode is recommended for large datasets. File mode is useful for small files that fit in memory. Defaults to File.

", "ProcessingS3Input$S3InputMode": "

Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local ML storage volume before starting your processing container. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your processing container into named pipes without using the ML storage volume.

" } }, @@ -9019,7 +9019,7 @@ } }, "StoppingCondition": { - "base": "

Specifies a limit to how long a model training job, model compilation job, or hyperparameter tuning job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a training job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

", + "base": "

Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.

To stop a training job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel.

The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.

", "refs": { "CreateCompilationJobRequest$StoppingCondition": "

Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.

", "CreateTrainingJobRequest$StoppingCondition": "

Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.

", @@ -9239,7 +9239,7 @@ } }, "Tag": { - "base": "

A tag object that consists of a key and an optional value, used to manage metadata for Amazon SageMaker Amazon Web Services resources.

You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to Amazon SageMaker resources, see AddTags.

For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources. For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy.

", + "base": "

A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources.

You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags.

For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources. For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy.

", "refs": { "TagList$member": null } @@ -9504,7 +9504,7 @@ "DescribeAppImageConfigResponse$CreationTime": "

When the AppImageConfig was created.

", "DescribeAppImageConfigResponse$LastModifiedTime": "

When the AppImageConfig was last modified.

", "DescribeAppResponse$LastHealthCheckTimestamp": "

The timestamp of the last health check.

", - "DescribeAppResponse$LastUserActivityTimestamp": "

The timestamp of the last user's activity.

", + "DescribeAppResponse$LastUserActivityTimestamp": "

The timestamp of the last user's activity. LastUserActivityTimestamp is also updated when SageMaker performs health checks without user activity. As a result, this value is set to the same value as LastHealthCheckTimestamp.

", "DescribeArtifactResponse$CreationTime": "

When the artifact was created.

", "DescribeArtifactResponse$LastModifiedTime": "

When the artifact was last modified.

", "DescribeAutoMLJobResponse$CreationTime": "

Returns the creation time of the AutoML job.

", @@ -9811,13 +9811,13 @@ } }, "TrainingInputMode": { - "base": null, + "base": "

The training input mode that the algorithm supports. For more information about input modes, see Algorithms.

Pipe mode

If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container.

File mode

If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container.

You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any.

For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training.

FastFile mode

If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk.

FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided.

", "refs": { - "AlgorithmSpecification$TrainingInputMode": "

The input mode that the algorithm supports. For the input modes that Amazon SageMaker algorithms support, see Algorithms. If an algorithm supports the File input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe input mode, Amazon SageMaker streams data directly from S3 to the container.

In File mode, make sure you provision ML storage volume with sufficient capacity to accommodate the data download from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container use ML storage volume to also store intermediate information, if any.

For distributed algorithms using File mode, training data is distributed uniformly, and your training duration is predictable if the input data objects size is approximately same. Amazon SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed where one host in a training cluster is overloaded, thus becoming bottleneck in training.

", + "AlgorithmSpecification$TrainingInputMode": null, "Channel$InputMode": "

(Optional) The input mode to use for the data channel in a training job. If you don't set a value for InputMode, Amazon SageMaker uses the value set for TrainingInputMode. Use this parameter to override the TrainingInputMode setting in a AlgorithmSpecification request when you have a channel that needs a different input mode from the training job's general setting. To download the data from Amazon Simple Storage Service (Amazon S3) to the provisioned ML storage volume, and mount the directory to a Docker volume, use File input mode. To stream data directly from Amazon S3 to the container, choose Pipe input mode.

To use a model for incremental training, choose File input model.

", - "HyperParameterAlgorithmSpecification$TrainingInputMode": "

The input mode that the algorithm supports: File or Pipe. In File input mode, Amazon SageMaker downloads the training data from Amazon S3 to the storage volume that is attached to the training instance and mounts the directory to the Docker volume for the training container. In Pipe input mode, Amazon SageMaker streams data directly from Amazon S3 to the container.

If you specify File mode, make sure that you provision the storage volume that is attached to the training instance with enough capacity to accommodate the training data downloaded from Amazon S3, the model artifacts, and intermediate information.

For more information about input modes, see Algorithms.

", + "HyperParameterAlgorithmSpecification$TrainingInputMode": null, "InputModes$member": null, - "TrainingJobDefinition$TrainingInputMode": "

The input mode used by the algorithm for the training job. For the input modes that Amazon SageMaker algorithms support, see Algorithms.

If an algorithm supports the File input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe input mode, Amazon SageMaker streams data directly from S3 to the container.

" + "TrainingJobDefinition$TrainingInputMode": null } }, "TrainingInstanceCount": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 85ebfc99496..b59007d3a36 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -1982,6 +1982,29 @@ "us-west-2" : { } } }, + "databrew" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "dataexchange" : { "endpoints" : { "ap-northeast-1" : { }, @@ -4107,6 +4130,35 @@ "us-west-2" : { } } }, + "kendra" : { + "endpoints" : { + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "kendra-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "kendra-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "kendra-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "kinesis" : { "endpoints" : { "af-south-1" : { }, @@ -5145,6 +5197,12 @@ }, "hostname" : "oidc.eu-west-3.amazonaws.com" }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "oidc.sa-east-1.amazonaws.com" + }, "us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -8127,6 +8185,12 @@ "cn-northwest-1" : { } } }, + "databrew" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "dax" : { "endpoints" : { "cn-north-1" : { }, @@ -9287,6 +9351,11 @@ "us-gov-west-1" : { } } }, + "databrew" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "datasync" : { "endpoints" : { "fips-us-gov-east-1" : { diff --git a/service/amplifybackend/api.go b/service/amplifybackend/api.go index bdb842b4b61..62df71833c7 100644 --- a/service/amplifybackend/api.go +++ b/service/amplifybackend/api.go @@ -5603,6 +5603,8 @@ func (s *GetBackendJobOutput) SetUpdateTime(v string) *GetBackendJobOutput { type GetBackendOutput struct { _ struct{} `type:"structure"` + AmplifyFeatureFlags *string `locationName:"amplifyFeatureFlags" type:"string"` + AmplifyMetaConfig *string `locationName:"amplifyMetaConfig" type:"string"` AppId *string `locationName:"appId" type:"string"` @@ -5634,6 +5636,12 @@ func (s GetBackendOutput) GoString() string { return s.String() } +// SetAmplifyFeatureFlags sets the AmplifyFeatureFlags field's value. +func (s *GetBackendOutput) SetAmplifyFeatureFlags(v string) *GetBackendOutput { + s.AmplifyFeatureFlags = &v + return s +} + // SetAmplifyMetaConfig sets the AmplifyMetaConfig field's value. func (s *GetBackendOutput) SetAmplifyMetaConfig(v string) *GetBackendOutput { s.AmplifyMetaConfig = &v diff --git a/service/fsx/api.go b/service/fsx/api.go index afcd00c4c62..a02c51ad492 100644 --- a/service/fsx/api.go +++ b/service/fsx/api.go @@ -5049,6 +5049,15 @@ type CreateFileSystemFromBackupInput struct { // the Command Line Interface (CLI) or an Amazon Web Services SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + // Sets the version for the Amazon FSx for Lustre file system you're creating + // from a backup. Valid values are 2.10 and 2.12. + // + // You don't need to specify FileSystemTypeVersion because it will be applied + // using the backup's FileSystemTypeVersion setting. If you choose to specify + // FileSystemTypeVersion when creating from backup, the value must match the + // backup's FileSystemTypeVersion setting. + FileSystemTypeVersion *string `min:"1" type:"string"` + // The ID of the Key Management Service (KMS) key used to encrypt the file system's // data for Amazon FSx for Windows File Server file systems, Amazon FSx for // NetApp ONTAP file systems, and Amazon FSx for Lustre PERSISTENT_1 file systems @@ -5137,6 +5146,9 @@ func (s *CreateFileSystemFromBackupInput) Validate() error { if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) } + if s.FileSystemTypeVersion != nil && len(*s.FileSystemTypeVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemTypeVersion", 1)) + } if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) } @@ -5185,6 +5197,12 @@ func (s *CreateFileSystemFromBackupInput) SetClientRequestToken(v string) *Creat return s } +// SetFileSystemTypeVersion sets the FileSystemTypeVersion field's value. +func (s *CreateFileSystemFromBackupInput) SetFileSystemTypeVersion(v string) *CreateFileSystemFromBackupInput { + s.FileSystemTypeVersion = &v + return s +} + // SetKmsKeyId sets the KmsKeyId field's value. func (s *CreateFileSystemFromBackupInput) SetKmsKeyId(v string) *CreateFileSystemFromBackupInput { s.KmsKeyId = &v @@ -5274,6 +5292,16 @@ type CreateFileSystemInput struct { // FileSystemType is a required field FileSystemType *string `type:"string" required:"true" enum:"FileSystemType"` + // Sets the version of the Amazon FSx for Lustre file system you're creating. + // Valid values are 2.10 and 2.12. + // + // * Set the value to 2.10 to create a Lustre 2.10 file system. + // + // * Set the value to 2.12 to create a Lustre 2.12 file system. + // + // Default value is 2.10. + FileSystemTypeVersion *string `min:"1" type:"string"` + // The ID of the Key Management Service (KMS) key used to encrypt the file system's // data for Amazon FSx for Windows File Server file systems, Amazon FSx for // NetApp ONTAP file systems, and Amazon FSx for Lustre PERSISTENT_1 file systems @@ -5391,6 +5419,9 @@ func (s *CreateFileSystemInput) Validate() error { if s.FileSystemType == nil { invalidParams.Add(request.NewErrParamRequired("FileSystemType")) } + if s.FileSystemTypeVersion != nil && len(*s.FileSystemTypeVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemTypeVersion", 1)) + } if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) } @@ -5447,6 +5478,12 @@ func (s *CreateFileSystemInput) SetFileSystemType(v string) *CreateFileSystemInp return s } +// SetFileSystemTypeVersion sets the FileSystemTypeVersion field's value. +func (s *CreateFileSystemInput) SetFileSystemTypeVersion(v string) *CreateFileSystemInput { + s.FileSystemTypeVersion = &v + return s +} + // SetKmsKeyId sets the KmsKeyId field's value. func (s *CreateFileSystemInput) SetKmsKeyId(v string) *CreateFileSystemInput { s.KmsKeyId = &v @@ -6055,11 +6092,10 @@ type CreateFileSystemWindowsConfiguration struct { // data transfer costs and minimize latency. PreferredSubnetId *string `min:"15" type:"string"` - // The configuration that Amazon FSx uses to join a Amazon FSx for Windows File - // Server file system or an ONTAP storage virtual machine (SVM) to a self-managed - // (including on-premises) Microsoft Active Directory (AD) directory. For more - // information, see Using Amazon FSx with your self-managed Microsoft Active - // Directory (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html) + // The configuration that Amazon FSx uses to join a FSx for Windows File Server + // file system or an ONTAP storage virtual machine (SVM) to a self-managed (including + // on-premises) Microsoft Active Directory (AD) directory. For more information, + // see Using Amazon FSx with your self-managed Microsoft Active Directory (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html) // or Managing SVMs (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html). SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfiguration `type:"structure"` @@ -6533,11 +6569,10 @@ type CreateSvmActiveDirectoryConfiguration struct { // NetBiosName is a required field NetBiosName *string `min:"1" type:"string" required:"true"` - // The configuration that Amazon FSx uses to join a Amazon FSx for Windows File - // Server file system or an ONTAP storage virtual machine (SVM) to a self-managed - // (including on-premises) Microsoft Active Directory (AD) directory. For more - // information, see Using Amazon FSx with your self-managed Microsoft Active - // Directory (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html) + // The configuration that Amazon FSx uses to join a FSx for Windows File Server + // file system or an ONTAP storage virtual machine (SVM) to a self-managed (including + // on-premises) Microsoft Active Directory (AD) directory. For more information, + // see Using Amazon FSx with your self-managed Microsoft Active Directory (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html) // or Managing SVMs (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html). SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfiguration `type:"structure"` } @@ -9323,6 +9358,9 @@ type FileSystem struct { // The type of Amazon FSx file system, which can be LUSTRE, WINDOWS, or ONTAP. FileSystemType *string `type:"string" enum:"FileSystemType"` + // The version of your Amazon FSx for Lustre file system, either 2.10 or 2.12. + FileSystemTypeVersion *string `min:"1" type:"string"` + // The ID of the Key Management Service (KMS) key used to encrypt the file system's // data for Amazon FSx for Windows File Server file systems, Amazon FSx for // NetApp ONTAP file systems, and persistent Amazon FSx for Lustre file systems @@ -9464,6 +9502,12 @@ func (s *FileSystem) SetFileSystemType(v string) *FileSystem { return s } +// SetFileSystemTypeVersion sets the FileSystemTypeVersion field's value. +func (s *FileSystem) SetFileSystemTypeVersion(v string) *FileSystem { + s.FileSystemTypeVersion = &v + return s +} + // SetKmsKeyId sets the KmsKeyId field's value. func (s *FileSystem) SetKmsKeyId(v string) *FileSystem { s.KmsKeyId = &v @@ -11375,7 +11419,7 @@ func (s *ResourceNotFound) RequestID() string { type SelfManagedActiveDirectoryAttributes struct { _ struct{} `type:"structure"` - // A list of up to two IP addresses of DNS servers or domain controllers in + // A list of up to three IP addresses of DNS servers or domain controllers in // the self-managed AD directory. DnsIps []*string `min:"1" type:"list"` @@ -11444,16 +11488,15 @@ func (s *SelfManagedActiveDirectoryAttributes) SetUserName(v string) *SelfManage return s } -// The configuration that Amazon FSx uses to join a Amazon FSx for Windows File -// Server file system or an ONTAP storage virtual machine (SVM) to a self-managed -// (including on-premises) Microsoft Active Directory (AD) directory. For more -// information, see Using Amazon FSx with your self-managed Microsoft Active -// Directory (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html) +// The configuration that Amazon FSx uses to join a FSx for Windows File Server +// file system or an ONTAP storage virtual machine (SVM) to a self-managed (including +// on-premises) Microsoft Active Directory (AD) directory. For more information, +// see Using Amazon FSx with your self-managed Microsoft Active Directory (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html) // or Managing SVMs (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html). type SelfManagedActiveDirectoryConfiguration struct { _ struct{} `type:"structure"` - // A list of up to two IP addresses of DNS servers or domain controllers in + // A list of up to three IP addresses of DNS servers or domain controllers in // the self-managed AD directory. // // DnsIps is a required field @@ -11604,7 +11647,7 @@ func (s *SelfManagedActiveDirectoryConfiguration) SetUserName(v string) *SelfMan type SelfManagedActiveDirectoryConfigurationUpdates struct { _ struct{} `type:"structure"` - // A list of up to two IP addresses of DNS servers or domain controllers in + // A list of up to three IP addresses of DNS servers or domain controllers in // the self-managed AD directory. DnsIps []*string `min:"1" type:"list"` diff --git a/service/kendra/api.go b/service/kendra/api.go index f82da346c68..33e2483e39d 100644 --- a/service/kendra/api.go +++ b/service/kendra/api.go @@ -445,11 +445,10 @@ func (c *Kendra) CreateDataSourceRequest(input *CreateDataSourceInput) (req *req // CreateDataSource API operation for AWSKendraFrontendService. // -// Creates a data source that you use to with an Amazon Kendra index. +// Creates a data source that you want to use with an Amazon Kendra index. // // You specify a name, data source connector type and description for your data -// source. You also specify configuration information such as document metadata -// (author, source URI, and so on) and user context information. +// source. You also specify configuration information for the data source connector. // // CreateDataSource is a synchronous operation. The operation returns 200 if // the data source was successfully created. Otherwise, an exception is raised. @@ -3046,12 +3045,15 @@ func (c *Kendra) PutPrincipalMappingRequest(input *PutPrincipalMappingInput) (re // PutPrincipalMapping API operation for AWSKendraFrontendService. // -// Maps users to their groups. You can also map sub groups to groups. For example, -// the group "Company Intellectual Property Teams" includes sub groups "Research" -// and "Engineering". These sub groups include their own list of users or people -// who work in these teams. Only users who work in research and engineering, -// and therefore belong in the intellectual property group, can see top-secret -// company documents in their search results. +// Maps users to their groups so that you only need to provide the user ID when +// you issue the query. +// +// You can also map sub groups to groups. For example, the group "Company Intellectual +// Property Teams" includes sub groups "Research" and "Engineering". These sub +// groups include their own list of users or people who work in these teams. +// Only users who work in research and engineering, and therefore belong in +// the intellectual property group, can see top-secret company documents in +// their search results. // // You map users to their groups when you want to filter search results for // different users based on their group’s access to documents. For more information @@ -4392,9 +4394,9 @@ func (s *AdditionalResultAttributeValue) SetTextWithHighlightsValue(v *TextWithH // If you use more than 2 layers, you receive a ValidationException exception // with the message "AttributeFilter cannot have a depth of more than 2." // -// If you use more than 10 attribute filters, you receive a ValidationException -// exception with the message "AttributeFilter cannot have a length of more -// than 10". +// If you use more than 10 attribute filters in a given list for AndAllFilters +// or OrAllFilters, you receive a ValidationException with the message "AttributeFilter +// cannot have a length of more than 10". type AttributeFilter struct { _ struct{} `type:"structure"` @@ -4413,19 +4415,19 @@ type AttributeFilter struct { EqualsTo *DocumentAttribute `type:"structure"` // Performs a greater than operation on two document attributes. Use with a - // document attribute of type Integer or Long. + // document attribute of type Date or Long. GreaterThan *DocumentAttribute `type:"structure"` // Performs a greater or equals than operation on two document attributes. Use - // with a document attribute of type Integer or Long. + // with a document attribute of type Date or Long. GreaterThanOrEquals *DocumentAttribute `type:"structure"` // Performs a less than operation on two document attributes. Use with a document - // attribute of type Integer or Long. + // attribute of type Date or Long. LessThan *DocumentAttribute `type:"structure"` // Performs a less than or equals operation on two document attributes. Use - // with a document attribute of type Integer or Long. + // with a document attribute of type Date or Long. LessThanOrEquals *DocumentAttribute `type:"structure"` // Performs a logical NOT operation on all supplied filters. @@ -7015,16 +7017,23 @@ type CreateIndexInput struct { // // ATTRIBUTE_FILTER // - // All indexed content is searchable and displayable for all users. If there - // is an access control list, it is ignored. You can filter on user and group - // attributes. + // All indexed content is searchable and displayable for all users. If you want + // to filter search results on user context, you can use the attribute filters + // of _user_id and _group_ids or you can provide user and group information + // in UserContext. // // USER_TOKEN // - // Enables SSO and token-based user access control. All documents with no access - // control and all documents accessible to the user will be searchable and displayable. + // Enables token-based user access control to filter search results on user + // context. All documents with no access control and all documents accessible + // to the user will be searchable and displayable. UserContextPolicy *string `type:"string" enum:"UserContextPolicy"` + // Enables fetching access levels of groups and users from an AWS Single Sign-On + // identity source. To configure this, see UserGroupResolutionConfiguration + // (https://docs.aws.amazon.com/kendra/latest/dg/API_UserGroupResolutionConfiguration.html). + UserGroupResolutionConfiguration *UserGroupResolutionConfiguration `type:"structure"` + // The user token configuration. UserTokenConfigurations []*UserTokenConfiguration `type:"list"` } @@ -7080,6 +7089,11 @@ func (s *CreateIndexInput) Validate() error { } } } + if s.UserGroupResolutionConfiguration != nil { + if err := s.UserGroupResolutionConfiguration.Validate(); err != nil { + invalidParams.AddNested("UserGroupResolutionConfiguration", err.(request.ErrInvalidParams)) + } + } if s.UserTokenConfigurations != nil { for i, v := range s.UserTokenConfigurations { if v == nil { @@ -7145,6 +7159,12 @@ func (s *CreateIndexInput) SetUserContextPolicy(v string) *CreateIndexInput { return s } +// SetUserGroupResolutionConfiguration sets the UserGroupResolutionConfiguration field's value. +func (s *CreateIndexInput) SetUserGroupResolutionConfiguration(v *UserGroupResolutionConfiguration) *CreateIndexInput { + s.UserGroupResolutionConfiguration = v + return s +} + // SetUserTokenConfigurations sets the UserTokenConfigurations field's value. func (s *CreateIndexInput) SetUserTokenConfigurations(v []*UserTokenConfiguration) *CreateIndexInput { s.UserTokenConfigurations = v @@ -7379,7 +7399,7 @@ type CreateThesaurusInput struct { // A token that you provide to identify the request to create a thesaurus. Multiple // calls to the CreateThesaurus operation with the same client token will create - // only one index. + // only one thesaurus. ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` // The description for the new thesaurus. @@ -9449,6 +9469,10 @@ type DescribeIndexOutput struct { // The user context policy for the Amazon Kendra index. UserContextPolicy *string `type:"string" enum:"UserContextPolicy"` + // Shows whether you have enabled the configuration for fetching access levels + // of groups and users from an AWS Single Sign-On identity source. + UserGroupResolutionConfiguration *UserGroupResolutionConfiguration `type:"structure"` + // The user token configuration for the Amazon Kendra index. UserTokenConfigurations []*UserTokenConfiguration `type:"list"` } @@ -9555,6 +9579,12 @@ func (s *DescribeIndexOutput) SetUserContextPolicy(v string) *DescribeIndexOutpu return s } +// SetUserGroupResolutionConfiguration sets the UserGroupResolutionConfiguration field's value. +func (s *DescribeIndexOutput) SetUserGroupResolutionConfiguration(v *UserGroupResolutionConfiguration) *DescribeIndexOutput { + s.UserGroupResolutionConfiguration = v + return s +} + // SetUserTokenConfigurations sets the UserTokenConfigurations field's value. func (s *DescribeIndexOutput) SetUserTokenConfigurations(v []*UserTokenConfiguration) *DescribeIndexOutput { s.UserTokenConfigurations = v @@ -9785,7 +9815,7 @@ func (s *DescribeQuerySuggestionsBlockListInput) SetIndexId(v string) *DescribeQ type DescribeQuerySuggestionsBlockListOutput struct { _ struct{} `type:"structure"` - // Shows the date-time a block list for query suggestions was last created. + // Shows the date-time a block list for query suggestions was created. CreatedAt *time.Time `type:"timestamp"` // Shows the description for the block list. @@ -11449,6 +11479,11 @@ type GroupMembers struct { // for a group. Your sub groups can contain more than 1000 users, but the list // of sub groups that belong to a group (and/or users) must be no more than // 1000. + // + // You can download this example S3 file (https://docs.aws.amazon.com/kendra/latest/dg/samples/group_members.zip) + // that uses the correct format for listing group members. Note, dataSourceId + // is optional. The value of type for a group is always GROUP and for a user + // it is always USER. S3PathforGroupMembers *S3Path `type:"structure"` } @@ -12169,8 +12204,9 @@ type ListDataSourceSyncJobsInput struct { // results. MaxResults *int64 `min:"1" type:"integer"` - // If the result of the previous request to GetDataSourceSyncJobHistory was - // truncated, include the NextToken to fetch the next set of jobs. + // If the previous response was incomplete (because there is more data to retrieve), + // Amazon Kendra returns a pagination token in the response. You can use this + // pagination token to retrieve the next set of jobs. NextToken *string `min:"1" type:"string"` // When specified, the synchronization jobs returned in the list are limited @@ -12270,11 +12306,8 @@ type ListDataSourceSyncJobsOutput struct { // A history of synchronization jobs for the data source. History []*DataSourceSyncJob `type:"list"` - // The GetDataSourceSyncJobHistory operation returns a page of vocabularies - // at a time. The maximum size of the page is set by the MaxResults parameter. - // If there are more jobs in the list than the page size, Amazon Kendra returns - // the NextPage token. Include the token in the next request to the GetDataSourceSyncJobHistory - // operation to return in the next page of jobs. + // If the response is truncated, Amazon Kendra returns this token that you can + // use in the subsequent request to retrieve the next set of jobs. NextToken *string `min:"1" type:"string"` } @@ -12436,8 +12469,9 @@ type ListFaqsInput struct { // results in the list, this response contains only the actual results. MaxResults *int64 `min:"1" type:"integer"` - // If the result of the previous request to ListFaqs was truncated, include - // the NextToken to fetch the next set of FAQs. + // If the previous response was incomplete (because there is more data to retrieve), + // Amazon Kendra returns a pagination token in the response. You can use this + // pagination token to retrieve the next set of FAQs. NextToken *string `min:"1" type:"string"` } @@ -12505,11 +12539,8 @@ type ListFaqsOutput struct { // information about the FAQs associated with the specified index. FaqSummaryItems []*FaqSummary `type:"list"` - // The ListFaqs operation returns a page of FAQs at a time. The maximum size - // of the page is set by the MaxResults parameter. If there are more jobs in - // the list than the page size, Amazon Kendra returns the NextPage token. Include - // the token in the next request to the ListFaqs operation to return the next - // page of FAQs. + // If the response is truncated, Amazon Kendra returns this token that you can + // use in the subsequent request to retrieve the next set of FAQs. NextToken *string `min:"1" type:"string"` } @@ -12556,11 +12587,14 @@ type ListGroupsOlderThanOrderingIdInput struct { // IndexId is a required field IndexId *string `min:"36" type:"string" required:"true"` - // The maximum results shown for a list of groups that are mapped to users before - // a given ordering or timestamp identifier. + // The maximum number of returned groups that are mapped to users before a given + // ordering or timestamp identifier. MaxResults *int64 `min:"1" type:"integer"` - // The next items in the list of groups that go beyond the maximum. + // If the previous response was incomplete (because there is more data to retrieve), + // Amazon Kendra returns a pagination token in the response. You can use this + // pagination token to retrieve the next set of groups that are mapped to users + // before a given ordering or timestamp identifier. NextToken *string `min:"1" type:"string"` // The timestamp identifier used for the latest PUT or DELETE action for mapping @@ -12653,7 +12687,9 @@ type ListGroupsOlderThanOrderingIdOutput struct { // given ordering or timestamp identifier. GroupsSummaries []*GroupSummary `type:"list"` - // The next items in the list of groups that go beyond the maximum. + // If the response is truncated, Amazon Kendra returns this token that you can + // use in the subsequent request to retrieve the next set of groups that are + // mapped to users before a given ordering or timestamp identifier. NextToken *string `min:"1" type:"string"` } @@ -13076,7 +13112,7 @@ type ListThesauriOutput struct { // use in the subsequent request to retrieve the next set of thesauri. NextToken *string `min:"1" type:"string"` - // An array of summary information for one or more thesauruses. + // An array of summary information for a thesaurus or multiple thesauri. ThesaurusSummaryItems []*ThesaurusSummary `type:"list"` } @@ -13858,7 +13894,7 @@ type QueryInput struct { // relevance that Amazon Kendra determines for the result. SortingConfiguration *SortingConfiguration `type:"structure"` - // The user context token. + // The user context token or user and group information. UserContext *UserContext `type:"structure"` // Provides an identifier for a specific user. The VisitorId should be a unique @@ -17560,7 +17596,7 @@ func (s *TextWithHighlights) SetText(v string) *TextWithHighlights { return s } -// An array of summary information for one or more thesauruses. +// An array of summary information for a thesaurus or multiple thesauri. type ThesaurusSummary struct { _ struct{} `type:"structure"` @@ -17995,9 +18031,14 @@ type UpdateIndexInput struct { // CloudWatch logs. RoleArn *string `min:"1" type:"string"` - // The user user token context policy. + // The user context policy. UserContextPolicy *string `type:"string" enum:"UserContextPolicy"` + // Enables fetching access levels of groups and users from an AWS Single Sign-On + // identity source. To configure this, see UserGroupResolutionConfiguration + // (https://docs.aws.amazon.com/kendra/latest/dg/API_UserGroupResolutionConfiguration.html). + UserGroupResolutionConfiguration *UserGroupResolutionConfiguration `type:"structure"` + // The user token configuration. UserTokenConfigurations []*UserTokenConfiguration `type:"list"` } @@ -18050,6 +18091,11 @@ func (s *UpdateIndexInput) Validate() error { } } } + if s.UserGroupResolutionConfiguration != nil { + if err := s.UserGroupResolutionConfiguration.Validate(); err != nil { + invalidParams.AddNested("UserGroupResolutionConfiguration", err.(request.ErrInvalidParams)) + } + } if s.UserTokenConfigurations != nil { for i, v := range s.UserTokenConfigurations { if v == nil { @@ -18109,6 +18155,12 @@ func (s *UpdateIndexInput) SetUserContextPolicy(v string) *UpdateIndexInput { return s } +// SetUserGroupResolutionConfiguration sets the UserGroupResolutionConfiguration field's value. +func (s *UpdateIndexInput) SetUserGroupResolutionConfiguration(v *UserGroupResolutionConfiguration) *UpdateIndexInput { + s.UserGroupResolutionConfiguration = v + return s +} + // SetUserTokenConfigurations sets the UserTokenConfigurations field's value. func (s *UpdateIndexInput) SetUserTokenConfigurations(v []*UserTokenConfiguration) *UpdateIndexInput { s.UserTokenConfigurations = v @@ -18579,6 +18631,10 @@ func (s UpdateThesaurusOutput) GoString() string { // Provides the configuration information of the URLs to crawl. // +// You can only crawl websites that use the secure communication protocol, Hypertext +// Transfer Protocol Secure (HTTPS). If you receive an error when crawling a +// website, it could be that the website is blocked from crawling. +// // When selecting websites to index, you must adhere to the Amazon Acceptable // Use Policy (https://aws.amazon.com/aup/) and all other Amazon terms. Remember // that you must only use the Amazon Kendra web crawler to index your own webpages, @@ -18654,7 +18710,7 @@ func (s *Urls) SetSiteMapsConfiguration(v *SiteMapsConfiguration) *Urls { return s } -// Provides information about the user context for a Amazon Kendra index. +// Provides information about the user context for an Amazon Kendra index. // // This is used for filtering search results for different users based on their // access to documents. @@ -18663,8 +18719,8 @@ func (s *Urls) SetSiteMapsConfiguration(v *SiteMapsConfiguration) *Urls { // // * User token // -// * User ID, the groups the user belongs to, and the data sources the groups -// can access +// * User ID, the groups the user belongs to, and any data sources the groups +// can access. // // If you provide both, an exception is thrown. type UserContext struct { @@ -18761,6 +18817,66 @@ func (s *UserContext) SetUserId(v string) *UserContext { return s } +// Provides the configuration information to fetch access levels of groups and +// users from an AWS Single Sign-On identity source. This is useful for setting +// up user context filtering, where Amazon Kendra filters search results for +// different users based on their group's access to documents. You can also +// map your users to their groups for user context filtering using the PutPrincipalMapping +// operation (https://docs.aws.amazon.com/latest/dg/API_PutPrincipalMapping.html). +// +// To set up an AWS SSO identity source in the console to use with Amazon Kendra, +// see Getting started with an AWS SSO identity source (https://docs.aws.amazon.com/kendra/latest/dg/getting-started-aws-sso.html). +// You must also grant the required permissions to use AWS SSO with Amazon Kendra. +// For more information, see IAM roles for AWS Single Sign-On (https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html#iam-roles-aws-sso). +type UserGroupResolutionConfiguration struct { + _ struct{} `type:"structure"` + + // The identity store provider (mode) you want to use to fetch access levels + // of groups and users. AWS Single Sign-On is currently the only available mode. + // Your users and groups must exist in an AWS SSO identity source in order to + // use this mode. + // + // UserGroupResolutionMode is a required field + UserGroupResolutionMode *string `type:"string" required:"true" enum:"UserGroupResolutionMode"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UserGroupResolutionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UserGroupResolutionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UserGroupResolutionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UserGroupResolutionConfiguration"} + if s.UserGroupResolutionMode == nil { + invalidParams.Add(request.NewErrParamRequired("UserGroupResolutionMode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserGroupResolutionMode sets the UserGroupResolutionMode field's value. +func (s *UserGroupResolutionConfiguration) SetUserGroupResolutionMode(v string) *UserGroupResolutionConfiguration { + s.UserGroupResolutionMode = &v + return s +} + // Provides configuration information for a token configuration. type UserTokenConfiguration struct { _ struct{} `type:"structure"` @@ -18966,6 +19082,10 @@ type WebCrawlerConfiguration struct { // You can include website subdomains. You can list up to 100 seed URLs and // up to three sitemap URLs. // + // You can only crawl websites that use the secure communication protocol, Hypertext + // Transfer Protocol Secure (HTTPS). If you receive an error when crawling a + // website, it could be that the website is blocked from crawling. + // // When selecting websites to index, you must adhere to the Amazon Acceptable // Use Policy (https://aws.amazon.com/aup/) and all other Amazon terms. Remember // that you must only use the Amazon Kendra web crawler to index your own webpages, @@ -20244,6 +20364,22 @@ func UserContextPolicy_Values() []string { } } +const ( + // UserGroupResolutionModeAwsSso is a UserGroupResolutionMode enum value + UserGroupResolutionModeAwsSso = "AWS_SSO" + + // UserGroupResolutionModeNone is a UserGroupResolutionMode enum value + UserGroupResolutionModeNone = "NONE" +) + +// UserGroupResolutionMode_Values returns all elements of the UserGroupResolutionMode enum +func UserGroupResolutionMode_Values() []string { + return []string{ + UserGroupResolutionModeAwsSso, + UserGroupResolutionModeNone, + } +} + const ( // WebCrawlerModeHostOnly is a WebCrawlerMode enum value WebCrawlerModeHostOnly = "HOST_ONLY" diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index a3cb56ec320..c4d776cf718 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -1523,15 +1523,15 @@ func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *requ // the CreateEndpoint and CreateEndpointConfig API operations, add the following // policies to the role. // -// * Option 1: For a full Amazon SageMaker access, search and attach the -// AmazonSageMakerFullAccess policy. +// * Option 1: For a full SageMaker access, search and attach the AmazonSageMakerFullAccess +// policy. // // * Option 2: For granting a limited access to an IAM role, paste the following // Action elements manually into the JSON file of the IAM role: "Action": // ["sagemaker:CreateEndpoint", "sagemaker:CreateEndpointConfig"] "Resource": // [ "arn:aws:sagemaker:region:account-id:endpoint/endpointName" "arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName" -// ] For more information, see Amazon SageMaker API Permissions: Actions, -// Permissions, and Resources Reference (https://docs.aws.amazon.com/sagemaker/latest/dg/api-permissions-reference.html). +// ] For more information, see SageMaker API Permissions: Actions, Permissions, +// and Resources Reference (https://docs.aws.amazon.com/sagemaker/latest/dg/api-permissions-reference.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -23408,25 +23408,42 @@ type AlgorithmSpecification struct { // Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). TrainingImage *string `type:"string"` - // The input mode that the algorithm supports. For the input modes that Amazon - // SageMaker algorithms support, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - // If an algorithm supports the File input mode, Amazon SageMaker downloads - // the training data from S3 to the provisioned ML storage Volume, and mounts - // the directory to docker volume for training container. If an algorithm supports - // the Pipe input mode, Amazon SageMaker streams data directly from S3 to the - // container. + // The training input mode that the algorithm supports. For more information + // about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + // + // Pipe mode + // + // If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + // from Amazon S3 to the container. + // + // File mode + // + // If an algorithm supports File mode, SageMaker downloads the training data + // from S3 to the provisioned ML storage volume, and mounts the directory to + // the Docker volume for the training container. // - // In File mode, make sure you provision ML storage volume with sufficient capacity - // to accommodate the data download from S3. In addition to the training data, - // the ML storage volume also stores the output model. The algorithm container - // use ML storage volume to also store intermediate information, if any. + // You must provision the ML storage volume with sufficient capacity to accommodate + // the data downloaded from S3. In addition to the training data, the ML storage + // volume also stores the output model. The algorithm container uses the ML + // storage volume to also store intermediate information, if any. // - // For distributed algorithms using File mode, training data is distributed - // uniformly, and your training duration is predictable if the input data objects - // size is approximately same. Amazon SageMaker does not split the files any - // further for model training. If the object sizes are skewed, training won't - // be optimal as the data distribution is also skewed where one host in a training - // cluster is overloaded, thus becoming bottleneck in training. + // For distributed algorithms, training data is distributed uniformly. Your + // training duration is predictable if the input data objects sizes are approximately + // the same. SageMaker does not split the files any further for model training. + // If the object sizes are skewed, training won't be optimal as the data distribution + // is also skewed when one host in a training cluster is overloaded, thus becoming + // a bottleneck in training. + // + // FastFile mode + // + // If an algorithm supports FastFile mode, SageMaker streams data directly from + // S3 to the container with no code changes, and provides file system access + // to the data. Users can author their training script to interact with these + // files as if they were stored on disk. + // + // FastFile mode works best when the data is read sequentially. Augmented manifest + // files aren't supported. The startup time is lower when there are fewer files + // in the S3 bucket provided. // // TrainingInputMode is a required field TrainingInputMode *string `type:"string" required:"true" enum:"TrainingInputMode"` @@ -25960,13 +25977,20 @@ type AutoMLJobCompletionCriteria struct { _ struct{} `type:"structure"` // The maximum runtime, in seconds, an AutoML job has to complete. + // + // If an AutoML job exceeds the maximum runtime, the job is stopped automatically + // and its processing is ended gracefully. The AutoML job identifies the best + // model whose training was completed and marks it as the best-performing model. + // Any unfinished steps of the job, such as automatic one-click Autopilot model + // deployment, will not be completed. MaxAutoMLJobRuntimeInSeconds *int64 `min:"1" type:"integer"` // The maximum number of times a training job is allowed to run. MaxCandidates *int64 `min:"1" type:"integer"` - // The maximum time, in seconds, a training job is allowed to run as part of - // an AutoML job. + // The maximum time, in seconds, that each training job is allowed to run as + // part of a hyperparameter tuning job. For more information, see the used by + // the action. MaxRuntimePerTrainingJobInSeconds *int64 `min:"1" type:"integer"` } @@ -31243,9 +31267,14 @@ type CreateFeatureGroupInput struct { // * The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore. // // * A configuration for an Amazon Web Services Glue or Amazon Web Services - // Hive data cataolgue. + // Hive data catalog. // // * An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore. + // If KMS encryption key is not specified, by default we encrypt all data + // at rest using Amazon Web Services KMS key. By defining your bucket-level + // key (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html) + // for SSE, you can reduce Amazon Web Services KMS requests costs by up to + // 99 percent. // // To learn more about this parameter, see OfflineStoreConfig. OfflineStoreConfig *OfflineStoreConfig `type:"structure"` @@ -37230,17 +37259,17 @@ type DataProcessing struct { // to Input. You can specify OutputFilter as an additional filter to select // a portion of the joined dataset and store it in the output file. // - // For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds - // the transformed data to the input JSON object in an attribute called SageMakerOutput. - // The joined result for JSON must be a key-value pair object. If the input - // is not a key-value pair object, Amazon SageMaker creates a new JSON file. - // In the new JSON file, and the input data is stored under the SageMakerInput - // key and the results are stored in SageMakerOutput. + // For JSON or JSONLines objects, such as a JSON array, SageMaker adds the transformed + // data to the input JSON object in an attribute called SageMakerOutput. The + // joined result for JSON must be a key-value pair object. If the input is not + // a key-value pair object, SageMaker creates a new JSON file. In the new JSON + // file, and the input data is stored under the SageMakerInput key and the results + // are stored in SageMakerOutput. // - // For CSV data, Amazon SageMaker takes each row as a JSON array and joins the - // transformed data with the input by appending each transformed row to the - // end of the input. The joined data has the original input data followed by - // the transformed data and the output is a CSV file. + // For CSV data, SageMaker takes each row as a JSON array and joins the transformed + // data with the input by appending each transformed row to the end of the input. + // The joined data has the original input data followed by the transformed data + // and the output is a CSV file. // // For information on how joining in applied, see Workflow for Associating Inferences // with Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html#batch-transform-data-processing-workflow). @@ -41633,7 +41662,9 @@ type DescribeAppOutput struct { // The timestamp of the last health check. LastHealthCheckTimestamp *time.Time `type:"timestamp"` - // The timestamp of the last user's activity. + // The timestamp of the last user's activity. LastUserActivityTimestamp is also + // updated when SageMaker performs health checks without user activity. As a + // result, this value is set to the same value as LastHealthCheckTimestamp. LastUserActivityTimestamp *time.Time `type:"timestamp"` // The instance type and the Amazon Resource Name (ARN) of the SageMaker image @@ -48478,8 +48509,8 @@ type DescribeTrainingJobOutput struct { // time. // // Multiply BillableTimeInSeconds by the number of instances (InstanceCount) - // in your training cluster to get the total compute time Amazon SageMaker will - // bill you if you run distributed training. The formula is as follows: BillableTimeInSeconds + // in your training cluster to get the total compute time SageMaker will bill + // you if you run distributed training. The formula is as follows: BillableTimeInSeconds // * InstanceCount . // // You can calculate the savings from using managed spot training using the @@ -51337,9 +51368,9 @@ type EndpointInput struct { // by an S3 key. Defaults to FullyReplicated S3DataDistributionType *string `type:"string" enum:"ProcessingS3DataDistributionType"` - // Whether the Pipe or File is used as the input mode for transfering data for - // the monitoring job. Pipe mode is recommended for large datasets. File mode - // is useful for small files that fit in memory. Defaults to File. + // Whether the Pipe or File is used as the input mode for transferring data + // for the monitoring job. Pipe mode is recommended for large datasets. File + // mode is useful for small files that fit in memory. Defaults to File. S3InputMode *string `type:"string" enum:"ProcessingS3InputMode"` // If specified, monitoring jobs substract this time from the start time. For @@ -54797,18 +54828,42 @@ type HyperParameterAlgorithmSpecification struct { // Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). TrainingImage *string `type:"string"` - // The input mode that the algorithm supports: File or Pipe. In File input mode, - // Amazon SageMaker downloads the training data from Amazon S3 to the storage - // volume that is attached to the training instance and mounts the directory - // to the Docker volume for the training container. In Pipe input mode, Amazon - // SageMaker streams data directly from Amazon S3 to the container. + // The training input mode that the algorithm supports. For more information + // about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). // - // If you specify File mode, make sure that you provision the storage volume - // that is attached to the training instance with enough capacity to accommodate - // the training data downloaded from Amazon S3, the model artifacts, and intermediate - // information. + // Pipe mode // - // For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + // If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + // from Amazon S3 to the container. + // + // File mode + // + // If an algorithm supports File mode, SageMaker downloads the training data + // from S3 to the provisioned ML storage volume, and mounts the directory to + // the Docker volume for the training container. + // + // You must provision the ML storage volume with sufficient capacity to accommodate + // the data downloaded from S3. In addition to the training data, the ML storage + // volume also stores the output model. The algorithm container uses the ML + // storage volume to also store intermediate information, if any. + // + // For distributed algorithms, training data is distributed uniformly. Your + // training duration is predictable if the input data objects sizes are approximately + // the same. SageMaker does not split the files any further for model training. + // If the object sizes are skewed, training won't be optimal as the data distribution + // is also skewed when one host in a training cluster is overloaded, thus becoming + // a bottleneck in training. + // + // FastFile mode + // + // If an algorithm supports FastFile mode, SageMaker streams data directly from + // S3 to the container with no code changes, and provides file system access + // to the data. Users can author their training script to interact with these + // files as if they were stored on disk. + // + // FastFile mode works best when the data is read sequentially. Augmented manifest + // files aren't supported. The startup time is lower when there are fewer files + // in the S3 bucket provided. // // TrainingInputMode is a required field TrainingInputMode *string `type:"string" required:"true" enum:"TrainingInputMode"` @@ -78916,11 +78971,10 @@ func (s StopTransformJobOutput) GoString() string { return s.String() } -// Specifies a limit to how long a model training job, model compilation job, -// or hyperparameter tuning job can run. It also specifies how long a managed -// Spot training job has to complete. When the job reaches the time limit, Amazon -// SageMaker ends the training or compilation job. Use this API to cap model -// training costs. +// Specifies a limit to how long a model training job or model compilation job +// can run. It also specifies how long a managed spot training job has to complete. +// When the job reaches the time limit, Amazon SageMaker ends the training or +// compilation job. Use this API to cap model training costs. // // To stop a training job, Amazon SageMaker sends the algorithm the SIGTERM // signal, which delays job termination for 120 seconds. Algorithms can use @@ -79201,11 +79255,11 @@ func (s *SuggestionQuery) SetPropertyNameQuery(v *PropertyNameQuery) *Suggestion } // A tag object that consists of a key and an optional value, used to manage -// metadata for Amazon SageMaker Amazon Web Services resources. +// metadata for SageMaker Amazon Web Services resources. // // You can add tags to notebook instances, training jobs, hyperparameter tuning // jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, -// and endpoints. For more information on adding tags to Amazon SageMaker resources, +// and endpoints. For more information on adding tags to SageMaker resources, // see AddTags. // // For more information on adding metadata to your Amazon Web Services resources @@ -80033,14 +80087,42 @@ type TrainingJobDefinition struct { // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` - // The input mode used by the algorithm for the training job. For the input - // modes that Amazon SageMaker algorithms support, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + // The training input mode that the algorithm supports. For more information + // about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). // - // If an algorithm supports the File input mode, Amazon SageMaker downloads - // the training data from S3 to the provisioned ML storage Volume, and mounts - // the directory to docker volume for training container. If an algorithm supports - // the Pipe input mode, Amazon SageMaker streams data directly from S3 to the - // container. + // Pipe mode + // + // If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + // from Amazon S3 to the container. + // + // File mode + // + // If an algorithm supports File mode, SageMaker downloads the training data + // from S3 to the provisioned ML storage volume, and mounts the directory to + // the Docker volume for the training container. + // + // You must provision the ML storage volume with sufficient capacity to accommodate + // the data downloaded from S3. In addition to the training data, the ML storage + // volume also stores the output model. The algorithm container uses the ML + // storage volume to also store intermediate information, if any. + // + // For distributed algorithms, training data is distributed uniformly. Your + // training duration is predictable if the input data objects sizes are approximately + // the same. SageMaker does not split the files any further for model training. + // If the object sizes are skewed, training won't be optimal as the data distribution + // is also skewed when one host in a training cluster is overloaded, thus becoming + // a bottleneck in training. + // + // FastFile mode + // + // If an algorithm supports FastFile mode, SageMaker streams data directly from + // S3 to the container with no code changes, and provides file system access + // to the data. Users can author their training script to interact with these + // files as if they were stored on disk. + // + // FastFile mode works best when the data is read sequentially. Augmented manifest + // files aren't supported. The startup time is lower when there are fewer files + // in the S3 bucket provided. // // TrainingInputMode is a required field TrainingInputMode *string `type:"string" required:"true" enum:"TrainingInputMode"` @@ -90637,12 +90719,51 @@ func TrafficRoutingConfigType_Values() []string { } } +// The training input mode that the algorithm supports. For more information +// about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). +// +// Pipe mode +// +// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly +// from Amazon S3 to the container. +// +// File mode +// +// If an algorithm supports File mode, SageMaker downloads the training data +// from S3 to the provisioned ML storage volume, and mounts the directory to +// the Docker volume for the training container. +// +// You must provision the ML storage volume with sufficient capacity to accommodate +// the data downloaded from S3. In addition to the training data, the ML storage +// volume also stores the output model. The algorithm container uses the ML +// storage volume to also store intermediate information, if any. +// +// For distributed algorithms, training data is distributed uniformly. Your +// training duration is predictable if the input data objects sizes are approximately +// the same. SageMaker does not split the files any further for model training. +// If the object sizes are skewed, training won't be optimal as the data distribution +// is also skewed when one host in a training cluster is overloaded, thus becoming +// a bottleneck in training. +// +// FastFile mode +// +// If an algorithm supports FastFile mode, SageMaker streams data directly from +// S3 to the container with no code changes, and provides file system access +// to the data. Users can author their training script to interact with these +// files as if they were stored on disk. +// +// FastFile mode works best when the data is read sequentially. Augmented manifest +// files aren't supported. The startup time is lower when there are fewer files +// in the S3 bucket provided. const ( // TrainingInputModePipe is a TrainingInputMode enum value TrainingInputModePipe = "Pipe" // TrainingInputModeFile is a TrainingInputMode enum value TrainingInputModeFile = "File" + + // TrainingInputModeFastFile is a TrainingInputMode enum value + TrainingInputModeFastFile = "FastFile" ) // TrainingInputMode_Values returns all elements of the TrainingInputMode enum @@ -90650,6 +90771,7 @@ func TrainingInputMode_Values() []string { return []string{ TrainingInputModePipe, TrainingInputModeFile, + TrainingInputModeFastFile, } }