diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b4f760f4c0..cf74a385caf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.40.57 (2021-10-06) +=== + +### Service Client Updates +* `service/amplifybackend`: Updates service API and documentation +* `service/fsx`: Updates service API and documentation +* `service/kendra`: Updates service API and documentation + * Amazon Kendra now supports integration with AWS SSO +* `service/sagemaker`: Updates service API and documentation + * This release adds a new TrainingInputMode FastFile for SageMaker Training APIs. + Release v1.40.56 (2021-10-05) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 4de91f08ccf..b91240af426 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -2075,6 +2075,30 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "databrew": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "dataexchange": service{ Endpoints: endpoints{ @@ -4245,6 +4269,36 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "kendra": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -5314,6 +5368,12 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "sa-east-1": endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, "us-east-1": endpoint{ Hostname: "oidc.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -8373,6 +8433,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "databrew": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "dax": service{ Endpoints: endpoints{ @@ -9616,6 +9683,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "databrew": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "datasync": service{ Endpoints: endpoints{ diff --git a/aws/version.go b/aws/version.go index 1f0662dade5..c360ba46163 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.40.56" +const SDKVersion = "1.40.57" diff --git a/models/apis/amplifybackend/2020-08-11/api-2.json b/models/apis/amplifybackend/2020-08-11/api-2.json index b6a98a469e2..d7d5cc81a1e 100644 --- a/models/apis/amplifybackend/2020-08-11/api-2.json +++ b/models/apis/amplifybackend/2020-08-11/api-2.json @@ -2097,6 +2097,10 @@ "GetBackendRespObj" : { "type" : "structure", "members" : { + "AmplifyFeatureFlags" : { + "shape" : "__string", + "locationName" : "amplifyFeatureFlags" + }, "AmplifyMetaConfig" : { "shape" : "__string", "locationName" : "amplifyMetaConfig" @@ -2127,6 +2131,10 @@ "GetBackendResponse" : { "type" : "structure", "members" : { + "AmplifyFeatureFlags" : { + "shape" : "__string", + "locationName" : "amplifyFeatureFlags" + }, "AmplifyMetaConfig" : { "shape" : "__string", "locationName" : "amplifyMetaConfig" diff --git a/models/apis/amplifybackend/2020-08-11/docs-2.json b/models/apis/amplifybackend/2020-08-11/docs-2.json index eea578b8391..799944a2c6a 100644 --- a/models/apis/amplifybackend/2020-08-11/docs-2.json +++ b/models/apis/amplifybackend/2020-08-11/docs-2.json @@ -621,6 +621,7 @@ "GetBackendAuthRespObj$Error" : "
If the request fails, this error is returned.
", "GetBackendAuthRespObj$ResourceName" : "The name of this resource.
", "GetBackendReqObj$BackendEnvironmentName" : "The name of the backend environment.
", + "GetBackendRespObj$AmplifyFeatureFlags" : "A stringified version of the cli.json file for your Amplify project.
", "GetBackendRespObj$AmplifyMetaConfig" : "A stringified version of the current configs for your Amplify project.
", "GetBackendRespObj$AppId" : "The app ID.
", "GetBackendRespObj$AppName" : "The name of the app.
", diff --git a/models/apis/fsx/2018-03-01/api-2.json b/models/apis/fsx/2018-03-01/api-2.json index f8bae20e685..553605bb7a0 100644 --- a/models/apis/fsx/2018-03-01/api-2.json +++ b/models/apis/fsx/2018-03-01/api-2.json @@ -847,7 +847,8 @@ "WindowsConfiguration":{"shape":"CreateFileSystemWindowsConfiguration"}, "LustreConfiguration":{"shape":"CreateFileSystemLustreConfiguration"}, "StorageType":{"shape":"StorageType"}, - "KmsKeyId":{"shape":"KmsKeyId"} + "KmsKeyId":{"shape":"KmsKeyId"}, + "FileSystemTypeVersion":{"shape":"FileSystemTypeVersion"} } }, "CreateFileSystemFromBackupResponse":{ @@ -913,7 +914,8 @@ "KmsKeyId":{"shape":"KmsKeyId"}, "WindowsConfiguration":{"shape":"CreateFileSystemWindowsConfiguration"}, "LustreConfiguration":{"shape":"CreateFileSystemLustreConfiguration"}, - "OntapConfiguration":{"shape":"CreateFileSystemOntapConfiguration"} + "OntapConfiguration":{"shape":"CreateFileSystemOntapConfiguration"}, + "FileSystemTypeVersion":{"shape":"FileSystemTypeVersion"} } }, "CreateFileSystemResponse":{ @@ -1518,7 +1520,8 @@ "WindowsConfiguration":{"shape":"WindowsFileSystemConfiguration"}, "LustreConfiguration":{"shape":"LustreFileSystemConfiguration"}, "AdministrativeActions":{"shape":"AdministrativeActions"}, - "OntapConfiguration":{"shape":"OntapFileSystemConfiguration"} + "OntapConfiguration":{"shape":"OntapFileSystemConfiguration"}, + "FileSystemTypeVersion":{"shape":"FileSystemTypeVersion"} } }, "FileSystemAdministratorsGroupName":{ @@ -1596,6 +1599,12 @@ "ONTAP" ] }, + "FileSystemTypeVersion":{ + "type":"string", + "max":20, + "min":1, + "pattern":"^[0-9](\\.[0-9]*)*$" + }, "FileSystems":{ "type":"list", "member":{"shape":"FileSystem"}, diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index 5d59649afb8..b7f63bae463 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -756,9 +756,9 @@ "DnsIps": { "base": null, "refs": { - "SelfManagedActiveDirectoryAttributes$DnsIps": "A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.
", - "SelfManagedActiveDirectoryConfiguration$DnsIps": "A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.
", - "SelfManagedActiveDirectoryConfigurationUpdates$DnsIps": "A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.
" + "SelfManagedActiveDirectoryAttributes$DnsIps": "A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.
", + "SelfManagedActiveDirectoryConfiguration$DnsIps": "A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.
", + "SelfManagedActiveDirectoryConfigurationUpdates$DnsIps": "A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.
" } }, "DriveCacheType": { @@ -915,6 +915,14 @@ "FileSystem$FileSystemType": "The type of Amazon FSx file system, which can be LUSTRE
, WINDOWS
, or ONTAP
.
Sets the version for the Amazon FSx for Lustre file system you're creating from a backup. Valid values are 2.10
and 2.12
.
You don't need to specify FileSystemTypeVersion
because it will be applied using the backup's FileSystemTypeVersion
setting. If you choose to specify FileSystemTypeVersion
when creating from backup, the value must match the backup's FileSystemTypeVersion
setting.
Sets the version of the Amazon FSx for Lustre file system you're creating. Valid values are 2.10
and 2.12
.
Set the value to 2.10
to create a Lustre 2.10 file system.
Set the value to 2.12
to create a Lustre 2.12 file system.
Default value is 2.10
.
The version of your Amazon FSx for Lustre file system, either 2.10
or 2.12
.
A list of file systems.
", "refs": { @@ -1356,7 +1364,7 @@ } }, "SelfManagedActiveDirectoryConfiguration": { - "base": "The configuration that Amazon FSx uses to join a Amazon FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see Using Amazon FSx with your self-managed Microsoft Active Directory or Managing SVMs.
", + "base": "The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see Using Amazon FSx with your self-managed Microsoft Active Directory or Managing SVMs.
", "refs": { "CreateFileSystemWindowsConfiguration$SelfManagedActiveDirectoryConfiguration": null, "CreateSvmActiveDirectoryConfiguration$SelfManagedActiveDirectoryConfiguration": null diff --git a/models/apis/kendra/2019-02-03/api-2.json b/models/apis/kendra/2019-02-03/api-2.json index 6d1cffbe1dc..f0dd8ccdfe6 100644 --- a/models/apis/kendra/2019-02-03/api-2.json +++ b/models/apis/kendra/2019-02-03/api-2.json @@ -1279,7 +1279,8 @@ }, "Tags":{"shape":"TagList"}, "UserTokenConfigurations":{"shape":"UserTokenConfigurationList"}, - "UserContextPolicy":{"shape":"UserContextPolicy"} + "UserContextPolicy":{"shape":"UserContextPolicy"}, + "UserGroupResolutionConfiguration":{"shape":"UserGroupResolutionConfiguration"} } }, "CreateIndexResponse":{ @@ -1716,7 +1717,8 @@ "ErrorMessage":{"shape":"ErrorMessage"}, "CapacityUnits":{"shape":"CapacityUnitsConfiguration"}, "UserTokenConfigurations":{"shape":"UserTokenConfigurationList"}, - "UserContextPolicy":{"shape":"UserContextPolicy"} + "UserContextPolicy":{"shape":"UserContextPolicy"}, + "UserGroupResolutionConfiguration":{"shape":"UserGroupResolutionConfiguration"} } }, "DescribePrincipalMappingRequest":{ @@ -3657,7 +3659,8 @@ "DocumentMetadataConfigurationUpdates":{"shape":"DocumentMetadataConfigurationList"}, "CapacityUnits":{"shape":"CapacityUnitsConfiguration"}, "UserTokenConfigurations":{"shape":"UserTokenConfigurationList"}, - "UserContextPolicy":{"shape":"UserContextPolicy"} + "UserContextPolicy":{"shape":"UserContextPolicy"}, + "UserGroupResolutionConfiguration":{"shape":"UserGroupResolutionConfiguration"} } }, "UpdateQuerySuggestionsBlockListRequest":{ @@ -3737,6 +3740,20 @@ "USER_TOKEN" ] }, + "UserGroupResolutionConfiguration":{ + "type":"structure", + "required":["UserGroupResolutionMode"], + "members":{ + "UserGroupResolutionMode":{"shape":"UserGroupResolutionMode"} + } + }, + "UserGroupResolutionMode":{ + "type":"string", + "enum":[ + "AWS_SSO", + "NONE" + ] + }, "UserId":{ "type":"string", "max":1024, diff --git a/models/apis/kendra/2019-02-03/docs-2.json b/models/apis/kendra/2019-02-03/docs-2.json index e1db3fc9e8e..83e6ae16f1c 100644 --- a/models/apis/kendra/2019-02-03/docs-2.json +++ b/models/apis/kendra/2019-02-03/docs-2.json @@ -6,7 +6,7 @@ "BatchGetDocumentStatus": "Returns the indexing status for one or more documents submitted with the BatchPutDocument operation.
When you use the BatchPutDocument
operation, documents are indexed asynchronously. You can use the BatchGetDocumentStatus
operation to get the current status of a list of documents so that you can determine if they have been successfully indexed.
You can also use the BatchGetDocumentStatus
operation to check the status of the BatchDeleteDocument operation. When a document is deleted from the index, Amazon Kendra returns NOT_FOUND
as the status.
Adds one or more documents to an index.
The BatchPutDocument
operation enables you to ingest inline documents or a set of documents stored in an Amazon S3 bucket. Use this operation to ingest your text and unstructured text into an index, add custom attributes to the documents, and to attach an access control list to the documents added to the index.
The documents are indexed asynchronously. You can see the progress of the batch using Amazon Web Services CloudWatch. Any error messages related to processing the batch are sent to your Amazon Web Services CloudWatch log.
", "ClearQuerySuggestions": "Clears existing query suggestions from an index.
This deletes existing suggestions only, not the queries in the query log. After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. If you do not see any new suggestions, then please allow Amazon Kendra to collect enough queries to learn new suggestions.
", - "CreateDataSource": "Creates a data source that you use to with an Amazon Kendra index.
You specify a name, data source connector type and description for your data source. You also specify configuration information such as document metadata (author, source URI, and so on) and user context information.
CreateDataSource
is a synchronous operation. The operation returns 200 if the data source was successfully created. Otherwise, an exception is raised.
Creates a data source that you want to use with an Amazon Kendra index.
You specify a name, data source connector type and description for your data source. You also specify configuration information for the data source connector.
CreateDataSource
is a synchronous operation. The operation returns 200 if the data source was successfully created. Otherwise, an exception is raised.
Creates an new set of frequently asked question (FAQ) questions and answers.
", "CreateIndex": "Creates a new Amazon Kendra index. Index creation is an asynchronous operation. To determine if index creation has completed, check the Status
field returned from a call to DescribeIndex
. The Status
field is set to ACTIVE
when the index is ready to use.
Once the index is active you can index your documents using the BatchPutDocument
operation or using one of the supported data sources.
Creates a block list to exlcude certain queries from suggestions.
Any query that contains words or phrases specified in the block list is blocked or filtered out from being shown as a suggestion.
You need to provide the file location of your block list text file in your S3 bucket. In your text file, enter each block word or phrase on a separate line.
For information on the current quota limits for block lists, see Quotas for Amazon Kendra.
", @@ -33,7 +33,7 @@ "ListQuerySuggestionsBlockLists": "Lists the block lists used for query suggestions for an index.
For information on the current quota limits for block lists, see Quotas for Amazon Kendra.
", "ListTagsForResource": "Gets a list of tags associated with a specified resource. Indexes, FAQs, and data sources can have tags associated with them.
", "ListThesauri": "Lists the Amazon Kendra thesauri associated with an index.
", - "PutPrincipalMapping": "Maps users to their groups. You can also map sub groups to groups. For example, the group \"Company Intellectual Property Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include their own list of users or people who work in these teams. Only users who work in research and engineering, and therefore belong in the intellectual property group, can see top-secret company documents in their search results.
You map users to their groups when you want to filter search results for different users based on their group’s access to documents. For more information on filtering search results for different users, see Filtering on user context.
If more than five PUT
actions for a group are currently processing, a validation exception is thrown.
Maps users to their groups so that you only need to provide the user ID when you issue the query.
You can also map sub groups to groups. For example, the group \"Company Intellectual Property Teams\" includes sub groups \"Research\" and \"Engineering\". These sub groups include their own list of users or people who work in these teams. Only users who work in research and engineering, and therefore belong in the intellectual property group, can see top-secret company documents in their search results.
You map users to their groups when you want to filter search results for different users based on their group’s access to documents. For more information on filtering search results for different users, see Filtering on user context.
If more than five PUT
actions for a group are currently processing, a validation exception is thrown.
Searches an active index. Use this API to search your documents using query. The Query
operation enables to do faceted search and to filter results based on document attributes.
It also enables you to provide user context that Amazon Kendra uses to enforce document access control in the search results.
Amazon Kendra searches your index for text content and question and answer (FAQ) content. By default the response contains three types of results.
Relevant passages
Matching FAQs
Relevant documents
You can specify that the query return only one type of result using the QueryResultTypeConfig
parameter.
Each query returns the 100 most relevant results.
", "StartDataSourceSyncJob": "Starts a synchronization job for a data source. If a synchronization job is already in progress, Amazon Kendra returns a ResourceInUseException
exception.
Stops a running synchronization job. You can't stop a scheduled synchronization job.
", @@ -97,7 +97,7 @@ } }, "AttributeFilter": { - "base": "Provides filtering the query results based on document attributes.
When you use the AndAllFilters
or OrAllFilters
, filters you can use 2 layers under the first attribute filter. For example, you can use:
<AndAllFilters>
<OrAllFilters>
<EqualTo>
If you use more than 2 layers, you receive a ValidationException
exception with the message \"AttributeFilter
cannot have a depth of more than 2.\"
If you use more than 10 attribute filters, you receive a ValidationException
exception with the message \"AttributeFilter
cannot have a length of more than 10\".
Provides filtering the query results based on document attributes.
When you use the AndAllFilters
or OrAllFilters
, filters you can use 2 layers under the first attribute filter. For example, you can use:
<AndAllFilters>
<OrAllFilters>
<EqualTo>
If you use more than 2 layers, you receive a ValidationException
exception with the message \"AttributeFilter
cannot have a depth of more than 2.\"
If you use more than 10 attribute filters in a given list for AndAllFilters
or OrAllFilters
, you receive a ValidationException
with the message \"AttributeFilter
cannot have a length of more than 10\".
Performs a logical NOT
operation on all supplied filters.
A token that you provide to identify the request to create a FAQ. Multiple calls to the CreateFaqRequest
operation with the same client token will create only one FAQ.
A token that you provide to identify the request to create an index. Multiple calls to the CreateIndex
operation with the same client token will create only one index.
A token that you provide to identify the request to create a query suggestions block list.
", - "CreateThesaurusRequest$ClientToken": "A token that you provide to identify the request to create a thesaurus. Multiple calls to the CreateThesaurus
operation with the same client token will create only one index.
A token that you provide to identify the request to create a thesaurus. Multiple calls to the CreateThesaurus
operation with the same client token will create only one thesaurus.
Performs an equals operation on two document attributes.
", "AttributeFilter$ContainsAll": "Returns true when a document contains all of the specified document attributes. This filter is only applicable to StringListValue
metadata.
Returns true when a document contains any of the specified document attributes. This filter is only applicable to StringListValue
metadata.
Performs a greater than operation on two document attributes. Use with a document attribute of type Integer
or Long
.
Performs a greater or equals than operation on two document attributes. Use with a document attribute of type Integer
or Long
.
Performs a less than operation on two document attributes. Use with a document attribute of type Integer
or Long
.
Performs a less than or equals operation on two document attributes. Use with a document attribute of type Integer
or Long
.
Performs a greater than operation on two document attributes. Use with a document attribute of type Date
or Long
.
Performs a greater or equals than operation on two document attributes. Use with a document attribute of type Date
or Long
.
Performs a less than operation on two document attributes. Use with a document attribute of type Date
or Long
.
Performs a less than or equals operation on two document attributes. Use with a document attribute of type Date
or Long
.
The maximum results shown for a list of groups that are mapped to users before a given ordering or timestamp identifier.
" + "ListGroupsOlderThanOrderingIdRequest$MaxResults": "The maximum number of returned groups that are mapped to users before a given ordering or timestamp identifier.
" } }, "MaxResultsIntegerForListQuerySuggestionsBlockLists": { @@ -1657,14 +1657,14 @@ "NextToken": { "base": null, "refs": { - "ListDataSourceSyncJobsRequest$NextToken": "If the result of the previous request to GetDataSourceSyncJobHistory
was truncated, include the NextToken
to fetch the next set of jobs.
The GetDataSourceSyncJobHistory
operation returns a page of vocabularies at a time. The maximum size of the page is set by the MaxResults
parameter. If there are more jobs in the list than the page size, Amazon Kendra returns the NextPage token. Include the token in the next request to the GetDataSourceSyncJobHistory
operation to return in the next page of jobs.
If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of jobs.
", + "ListDataSourceSyncJobsResponse$NextToken": "If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of jobs.
", "ListDataSourcesRequest$NextToken": "If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of data sources (DataSourceSummaryItems
).
If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of data sources.
", - "ListFaqsRequest$NextToken": "If the result of the previous request to ListFaqs
was truncated, include the NextToken
to fetch the next set of FAQs.
The ListFaqs
operation returns a page of FAQs at a time. The maximum size of the page is set by the MaxResults
parameter. If there are more jobs in the list than the page size, Amazon Kendra returns the NextPage
token. Include the token in the next request to the ListFaqs
operation to return the next page of FAQs.
The next items in the list of groups that go beyond the maximum.
", - "ListGroupsOlderThanOrderingIdResponse$NextToken": "The next items in the list of groups that go beyond the maximum.
", + "ListFaqsRequest$NextToken": "If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of FAQs.
", + "ListFaqsResponse$NextToken": "If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of FAQs.
", + "ListGroupsOlderThanOrderingIdRequest$NextToken": "If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of groups that are mapped to users before a given ordering or timestamp identifier.
", + "ListGroupsOlderThanOrderingIdResponse$NextToken": "If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of groups that are mapped to users before a given ordering or timestamp identifier.
", "ListIndicesRequest$NextToken": "If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of indexes (DataSourceSummaryItems
).
If the response is truncated, Amazon Kendra returns this token that you can use in the subsequent request to retrieve the next set of indexes.
", "ListQuerySuggestionsBlockListsRequest$NextToken": "If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of block lists (BlockListSummaryItems
).
Shows the current S3 path to your block list text file in your S3 bucket.
Each block word or phrase should be on a separate line in a text file.
For information on the current quota limits for block lists, see Quotas for Amazon Kendra.
", "DescribeThesaurusResponse$SourceS3Path": null, "Document$S3Path": null, - "GroupMembers$S3PathforGroupMembers": "If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000.
", + "GroupMembers$S3PathforGroupMembers": "If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000.
You can download this example S3 file that uses the correct format for listing group members. Note, dataSourceId
is optional. The value of type
for a group is always GROUP
and for a user it is always USER
.
The S3 bucket location of a file containing a list of users whose documents should be indexed.
", "SharePointConfiguration$SslCertificateS3Path": null, "UpdateQuerySuggestionsBlockListRequest$SourceS3Path": "The S3 path where your block list text file sits in S3.
If you update your block list and provide the same path to the block list text file in S3, then Amazon Kendra reloads the file to refresh the block list. Amazon Kendra does not automatically refresh your block list. You need to call the UpdateQuerySuggestionsBlockList
API to refresh you block list.
If you update your block list, then Amazon Kendra asynchronously refreshes all query suggestions with the latest content in the S3 file. This means changes might not take effect immediately.
", @@ -2474,7 +2474,7 @@ } }, "ThesaurusSummary": { - "base": "An array of summary information for one or more thesauruses.
", + "base": "An array of summary information for a thesaurus or multiple thesauri.
", "refs": { "ThesaurusSummaryItems$member": null } @@ -2482,7 +2482,7 @@ "ThesaurusSummaryItems": { "base": null, "refs": { - "ListThesauriResponse$ThesaurusSummaryItems": "An array of summary information for one or more thesauruses.
" + "ListThesauriResponse$ThesaurusSummaryItems": "An array of summary information for a thesaurus or multiple thesauri.
" } }, "ThrottlingException": { @@ -2510,7 +2510,7 @@ "DescribeFaqResponse$UpdatedAt": "The date and time that the FAQ was last updated.
", "DescribeIndexResponse$CreatedAt": "The Unix datetime that the index was created.
", "DescribeIndexResponse$UpdatedAt": "The Unix datetime that the index was last updated.
", - "DescribeQuerySuggestionsBlockListResponse$CreatedAt": "Shows the date-time a block list for query suggestions was last created.
", + "DescribeQuerySuggestionsBlockListResponse$CreatedAt": "Shows the date-time a block list for query suggestions was created.
", "DescribeQuerySuggestionsBlockListResponse$UpdatedAt": "Shows the date-time a block list for query suggestions was last updated.
", "DescribeQuerySuggestionsConfigResponse$LastSuggestionsBuildTime": "Shows the date-time query suggestions for an index was last updated.
", "DescribeQuerySuggestionsConfigResponse$LastClearTime": "Shows the date-time query suggestions for an index was last cleared.
After you clear suggestions, Amazon Kendra learns new suggestions based on new queries added to the query log from the time you cleared suggestions. Amazon Kendra only considers re-occurences of a query from the time you cleared suggestions.
", @@ -2589,9 +2589,9 @@ } }, "Urls": { - "base": "Provides the configuration information of the URLs to crawl.
When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.
", + "base": "Provides the configuration information of the URLs to crawl.
You can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling.
When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.
", "refs": { - "WebCrawlerConfiguration$Urls": "Specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl.
You can include website subdomains. You can list up to 100 seed URLs and up to three sitemap URLs.
When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.
" + "WebCrawlerConfiguration$Urls": "Specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl.
You can include website subdomains. You can list up to 100 seed URLs and up to three sitemap URLs.
You can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling.
When selecting websites to index, you must adhere to the Amazon Acceptable Use Policy and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.
" } }, "UserAccount": { @@ -2601,17 +2601,31 @@ } }, "UserContext": { - "base": "Provides information about the user context for a Amazon Kendra index.
This is used for filtering search results for different users based on their access to documents.
You provide one of the following:
User token
User ID, the groups the user belongs to, and the data sources the groups can access
If you provide both, an exception is thrown.
", + "base": "Provides information about the user context for an Amazon Kendra index.
This is used for filtering search results for different users based on their access to documents.
You provide one of the following:
User token
User ID, the groups the user belongs to, and any data sources the groups can access.
If you provide both, an exception is thrown.
", "refs": { - "QueryRequest$UserContext": "The user context token.
" + "QueryRequest$UserContext": "The user context token or user and group information.
" } }, "UserContextPolicy": { "base": null, "refs": { - "CreateIndexRequest$UserContextPolicy": "The user context policy.
All indexed content is searchable and displayable for all users. If there is an access control list, it is ignored. You can filter on user and group attributes.
Enables SSO and token-based user access control. All documents with no access control and all documents accessible to the user will be searchable and displayable.
The user context policy.
All indexed content is searchable and displayable for all users. If you want to filter search results on user context, you can use the attribute filters of _user_id
and _group_ids
or you can provide user and group information in UserContext
.
Enables token-based user access control to filter search results on user context. All documents with no access control and all documents accessible to the user will be searchable and displayable.
The user context policy for the Amazon Kendra index.
", - "UpdateIndexRequest$UserContextPolicy": "The user user token context policy.
" + "UpdateIndexRequest$UserContextPolicy": "The user context policy.
" + } + }, + "UserGroupResolutionConfiguration": { + "base": "Provides the configuration information to fetch access levels of groups and users from an AWS Single Sign-On identity source. This is useful for setting up user context filtering, where Amazon Kendra filters search results for different users based on their group's access to documents. You can also map your users to their groups for user context filtering using the PutPrincipalMapping operation.
To set up an AWS SSO identity source in the console to use with Amazon Kendra, see Getting started with an AWS SSO identity source. You must also grant the required permissions to use AWS SSO with Amazon Kendra. For more information, see IAM roles for AWS Single Sign-On.
", + "refs": { + "CreateIndexRequest$UserGroupResolutionConfiguration": "Enables fetching access levels of groups and users from an AWS Single Sign-On identity source. To configure this, see UserGroupResolutionConfiguration.
", + "DescribeIndexResponse$UserGroupResolutionConfiguration": "Shows whether you have enabled the configuration for fetching access levels of groups and users from an AWS Single Sign-On identity source.
", + "UpdateIndexRequest$UserGroupResolutionConfiguration": "Enables fetching access levels of groups and users from an AWS Single Sign-On identity source. To configure this, see UserGroupResolutionConfiguration.
" + } + }, + "UserGroupResolutionMode": { + "base": null, + "refs": { + "UserGroupResolutionConfiguration$UserGroupResolutionMode": "The identity store provider (mode) you want to use to fetch access levels of groups and users. AWS Single Sign-On is currently the only available mode. Your users and groups must exist in an AWS SSO identity source in order to use this mode.
" } }, "UserId": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index e2138151456..024d6f152ef 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -14092,7 +14092,8 @@ "type":"string", "enum":[ "Pipe", - "File" + "File", + "FastFile" ] }, "TrainingInstanceCount":{ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 20ec08380d8..f351485d731 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -18,7 +18,7 @@ "CreateDeviceFleet": "Creates a device fleet.
", "CreateDomain": "Creates a Domain
used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.
EFS storage
When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.
SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption.
VPC configuration
All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType
parameter. AppNetworkAccessType
corresponds to the network access type that you choose when you onboard to Studio. The following options are available:
PublicInternetOnly
- Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value.
VpcOnly
- All Studio traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway.
When internet access is disabled, you won't be able to run a Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your security groups allow outbound connections.
NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a SageMaker Studio app successfully.
For more information, see Connect SageMaker Studio Notebooks to Resources in a VPC.
", "CreateEdgePackagingJob": "Starts a SageMaker Edge Manager model packaging job. Edge Manager will use the model artifacts from the Amazon Simple Storage Service bucket that you specify. After the model has been packaged, Amazon SageMaker saves the resulting artifacts to an S3 bucket that you specify.
", - "CreateEndpoint": "Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.
Use this API to deploy models using Amazon SageMaker hosting services.
For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see the Create Endpoint example notebook.
You must not delete an EndpointConfig
that is in use by an endpoint that is live or while the UpdateEndpoint
or CreateEndpoint
operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig
.
The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.
When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.
When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads
, the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.
When Amazon SageMaker receives the request, it sets the endpoint status to Creating
. After it creates the endpoint, it sets the status to InService
. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.
If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your IAM user account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.
To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.
Option 1: For a full Amazon SageMaker access, search and attach the AmazonSageMakerFullAccess
policy.
Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:
\"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]
\"Resource\": [
\"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"
\"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"
]
For more information, see Amazon SageMaker API Permissions: Actions, Permissions, and Resources Reference.
Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.
Use this API to deploy models using Amazon SageMaker hosting services.
For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see the Create Endpoint example notebook.
You must not delete an EndpointConfig
that is in use by an endpoint that is live or while the UpdateEndpoint
or CreateEndpoint
operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig
.
The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.
When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.
When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads
, the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.
When Amazon SageMaker receives the request, it sets the endpoint status to Creating
. After it creates the endpoint, it sets the status to InService
. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.
If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your IAM user account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.
To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.
Option 1: For a full SageMaker access, search and attach the AmazonSageMakerFullAccess
policy.
Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:
\"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]
\"Resource\": [
\"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"
\"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"
]
For more information, see SageMaker API Permissions: Actions, Permissions, and Resources Reference.
Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel
API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.
Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.
In the request, you define a ProductionVariant
, for each model that you want to deploy. Each ProductionVariant
parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.
If you are hosting multiple models, you also assign a VariantWeight
to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.
When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads
, the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.
Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.
The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.
When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK.
You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.
To add a description to an experiment, specify the optional Description
parameter. To add a description later, or to change the description, call the UpdateExperiment API.
To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.
", "CreateFeatureGroup": "Create a new FeatureGroup
. A FeatureGroup
is a group of Features
defined in the FeatureStore
to describe a Record
.
The FeatureGroup
defines the schema and features contained in the FeatureGroup. A FeatureGroup
definition is composed of a list of Features
, a RecordIdentifierFeatureName
, an EventTimeFeatureName
and configurations for its OnlineStore
and OfflineStore
. Check Amazon Web Services service quotas to see the FeatureGroup
s quota for your Amazon Web Services account.
You must include at least one of OnlineStoreConfig
and OfflineStoreConfig
to create a FeatureGroup
.
The billable time in seconds. Billable time refers to the absolute wall-clock time.
Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in your training cluster to get the total compute time Amazon SageMaker will bill you if you run distributed training. The formula is as follows: BillableTimeInSeconds * InstanceCount
.
You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is 80%.
The billable time in seconds. Billable time refers to the absolute wall-clock time.
Multiply BillableTimeInSeconds
by the number of instances (InstanceCount
) in your training cluster to get the total compute time SageMaker will bill you if you run distributed training. The formula is as follows: BillableTimeInSeconds * InstanceCount
.
You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example, if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is 500, the savings is 80%.
The billable time in seconds.
" } }, @@ -4905,7 +4905,7 @@ "JoinSource": { "base": null, "refs": { - "DataProcessing$JoinSource": "Specifies the source of the data to join with the transformed data. The valid values are None
and Input
. The default value is None
, which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource
to Input
. You can specify OutputFilter
as an additional filter to select a portion of the joined dataset and store it in the output file.
For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput
. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput
key and the results are stored in SageMakerOutput
.
For CSV data, Amazon SageMaker takes each row as a JSON array and joins the transformed data with the input by appending each transformed row to the end of the input. The joined data has the original input data followed by the transformed data and the output is a CSV file.
For information on how joining in applied, see Workflow for Associating Inferences with Input Records.
" + "DataProcessing$JoinSource": "Specifies the source of the data to join with the transformed data. The valid values are None
and Input
. The default value is None
, which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource
to Input
. You can specify OutputFilter
as an additional filter to select a portion of the joined dataset and store it in the output file.
For JSON or JSONLines objects, such as a JSON array, SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput
. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput
key and the results are stored in SageMakerOutput
.
For CSV data, SageMaker takes each row as a JSON array and joins the transformed data with the input by appending each transformed row to the end of the input. The joined data has the original input data followed by the transformed data and the output is a CSV file.
For information on how joining in applied, see Workflow for Associating Inferences with Input Records.
" } }, "JsonContentType": { @@ -5851,7 +5851,7 @@ "MaxAutoMLJobRuntimeInSeconds": { "base": null, "refs": { - "AutoMLJobCompletionCriteria$MaxAutoMLJobRuntimeInSeconds": "The maximum runtime, in seconds, an AutoML job has to complete.
" + "AutoMLJobCompletionCriteria$MaxAutoMLJobRuntimeInSeconds": "The maximum runtime, in seconds, an AutoML job has to complete.
If an AutoML job exceeds the maximum runtime, the job is stopped automatically and its processing is ended gracefully. The AutoML job identifies the best model whose training was completed and marks it as the best-performing model. Any unfinished steps of the job, such as automatic one-click Autopilot model deployment, will not be completed.
" } }, "MaxCandidates": { @@ -5976,7 +5976,7 @@ "MaxRuntimePerTrainingJobInSeconds": { "base": null, "refs": { - "AutoMLJobCompletionCriteria$MaxRuntimePerTrainingJobInSeconds": "The maximum time, in seconds, a training job is allowed to run as part of an AutoML job.
" + "AutoMLJobCompletionCriteria$MaxRuntimePerTrainingJobInSeconds": "The maximum time, in seconds, that each training job is allowed to run as part of a hyperparameter tuning job. For more information, see the used by the action.
" } }, "MaxWaitTimeInSeconds": { @@ -7220,7 +7220,7 @@ "OfflineStoreConfig": { "base": "The configuration of an OfflineStore
.
Provide an OfflineStoreConfig
in a request to CreateFeatureGroup
to create an OfflineStore
.
To encrypt an OfflineStore
using at rest data encryption, specify Amazon Web Services Key Management Service (KMS) key ID, or KMSKeyId
, in S3StorageConfig
.
Use this to configure an OfflineFeatureStore
. This parameter allows you to specify:
The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore
.
A configuration for an Amazon Web Services Glue or Amazon Web Services Hive data cataolgue.
An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore
.
To learn more about this parameter, see OfflineStoreConfig.
", + "CreateFeatureGroupRequest$OfflineStoreConfig": "Use this to configure an OfflineFeatureStore
. This parameter allows you to specify:
The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore
.
A configuration for an Amazon Web Services Glue or Amazon Web Services Hive data catalog.
An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore
. If KMS encryption key is not specified, by default we encrypt all data at rest using Amazon Web Services KMS key. By defining your bucket-level key for SSE, you can reduce Amazon Web Services KMS requests costs by up to 99 percent.
To learn more about this parameter, see OfflineStoreConfig.
", "DescribeFeatureGroupResponse$OfflineStoreConfig": "The configuration of the OfflineStore
, inducing the S3 location of the OfflineStore
, Amazon Web Services Glue or Amazon Web Services Hive data catalogue configurations, and the security configuration.
Whether the Pipe
or File
is used as the input mode for transfering data for the monitoring job. Pipe
mode is recommended for large datasets. File
mode is useful for small files that fit in memory. Defaults to File
.
Whether the Pipe
or File
is used as the input mode for transferring data for the monitoring job. Pipe
mode is recommended for large datasets. File
mode is useful for small files that fit in memory. Defaults to File
.
Whether to use File
or Pipe
input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local ML storage volume before starting your processing container. This is the most commonly used input mode. In Pipe
mode, Amazon SageMaker streams input data from the source directly to your processing container into named pipes without using the ML storage volume.
Specifies a limit to how long a model training job, model compilation job, or hyperparameter tuning job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.
To stop a training job, Amazon SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel
.
The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.
Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training or compilation job. Use this API to cap model training costs.
To stop a training job, Amazon SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
The training algorithms provided by Amazon SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel
.
The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.
Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.
", "CreateTrainingJobRequest$StoppingCondition": "Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, Amazon SageMaker ends the training job. Use this API to cap model training costs.
To stop a job, Amazon SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
A tag object that consists of a key and an optional value, used to manage metadata for Amazon SageMaker Amazon Web Services resources.
You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to Amazon SageMaker resources, see AddTags.
For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources. For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy.
", + "base": "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources.
You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags.
For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources. For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy.
", "refs": { "TagList$member": null } @@ -9504,7 +9504,7 @@ "DescribeAppImageConfigResponse$CreationTime": "When the AppImageConfig was created.
", "DescribeAppImageConfigResponse$LastModifiedTime": "When the AppImageConfig was last modified.
", "DescribeAppResponse$LastHealthCheckTimestamp": "The timestamp of the last health check.
", - "DescribeAppResponse$LastUserActivityTimestamp": "The timestamp of the last user's activity.
", + "DescribeAppResponse$LastUserActivityTimestamp": "The timestamp of the last user's activity. LastUserActivityTimestamp
is also updated when SageMaker performs health checks without user activity. As a result, this value is set to the same value as LastHealthCheckTimestamp
.
When the artifact was created.
", "DescribeArtifactResponse$LastModifiedTime": "When the artifact was last modified.
", "DescribeAutoMLJobResponse$CreationTime": "Returns the creation time of the AutoML job.
", @@ -9811,13 +9811,13 @@ } }, "TrainingInputMode": { - "base": null, + "base": "The training input mode that the algorithm supports. For more information about input modes, see Algorithms.
Pipe mode
If an algorithm supports Pipe
mode, Amazon SageMaker streams data directly from Amazon S3 to the container.
File mode
If an algorithm supports File
mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container.
You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any.
For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training.
FastFile mode
If an algorithm supports FastFile
mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk.
FastFile
mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided.
The input mode that the algorithm supports. For the input modes that Amazon SageMaker algorithms support, see Algorithms. If an algorithm supports the File
input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe
input mode, Amazon SageMaker streams data directly from S3 to the container.
In File mode, make sure you provision ML storage volume with sufficient capacity to accommodate the data download from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container use ML storage volume to also store intermediate information, if any.
For distributed algorithms using File mode, training data is distributed uniformly, and your training duration is predictable if the input data objects size is approximately same. Amazon SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed where one host in a training cluster is overloaded, thus becoming bottleneck in training.
", + "AlgorithmSpecification$TrainingInputMode": null, "Channel$InputMode": "(Optional) The input mode to use for the data channel in a training job. If you don't set a value for InputMode
, Amazon SageMaker uses the value set for TrainingInputMode
. Use this parameter to override the TrainingInputMode
setting in a AlgorithmSpecification request when you have a channel that needs a different input mode from the training job's general setting. To download the data from Amazon Simple Storage Service (Amazon S3) to the provisioned ML storage volume, and mount the directory to a Docker volume, use File
input mode. To stream data directly from Amazon S3 to the container, choose Pipe
input mode.
To use a model for incremental training, choose File
input model.
The input mode that the algorithm supports: File or Pipe. In File input mode, Amazon SageMaker downloads the training data from Amazon S3 to the storage volume that is attached to the training instance and mounts the directory to the Docker volume for the training container. In Pipe input mode, Amazon SageMaker streams data directly from Amazon S3 to the container.
If you specify File mode, make sure that you provision the storage volume that is attached to the training instance with enough capacity to accommodate the training data downloaded from Amazon S3, the model artifacts, and intermediate information.
For more information about input modes, see Algorithms.
", + "HyperParameterAlgorithmSpecification$TrainingInputMode": null, "InputModes$member": null, - "TrainingJobDefinition$TrainingInputMode": "The input mode used by the algorithm for the training job. For the input modes that Amazon SageMaker algorithms support, see Algorithms.
If an algorithm supports the File
input mode, Amazon SageMaker downloads the training data from S3 to the provisioned ML storage Volume, and mounts the directory to docker volume for training container. If an algorithm supports the Pipe
input mode, Amazon SageMaker streams data directly from S3 to the container.