diff --git a/.changes/1.35.25.json b/.changes/1.35.25.json new file mode 100644 index 0000000000..26891b23a5 --- /dev/null +++ b/.changes/1.35.25.json @@ -0,0 +1,42 @@ +[ + { + "category": "``apigateway``", + "description": "Documentation updates for Amazon API Gateway", + "type": "api-change" + }, + { + "category": "``athena``", + "description": "List/Get/Update/Delete/CreateDataCatalog now integrate with AWS Glue connections. Users can create a Glue connection through Athena or use a Glue connection to define their Athena federated parameters.", + "type": "api-change" + }, + { + "category": "``bedrock-agent``", + "description": "Amazon Bedrock Prompt Flows and Prompt Management now supports using inference profiles to increase throughput and improve resilience.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Amazon EC2 G6e instances powered by NVIDIA L40S Tensor Core GPUs are the most cost-efficient GPU instances for deploying generative AI models and the highest performance GPU instances for spatial computing workloads.", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "This release adds support for job concurrency and queuing configuration at Application level.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Added AthenaProperties parameter to Glue Connections, allowing Athena to store service specific properties on Glue Connections.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Support ComputeRedundancy parameter in ModifyDBShardGroup API. Add DBShardGroupArn in DBShardGroup API response. Remove InvalidMaxAcuFault from CreateDBShardGroup and ModifyDBShardGroup API. Both API will throw InvalidParameterValueException for invalid ACU configuration.", + "type": "api-change" + }, + { + "category": "``resource-explorer-2``", + "description": "AWS Resource Explorer released ListResources feature which allows customers to list all indexed AWS resources within a view.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3393778774..2b70d94b67 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,19 @@ CHANGELOG ========= +1.35.25 +======= + +* api-change:``apigateway``: Documentation updates for Amazon API Gateway +* api-change:``athena``: List/Get/Update/Delete/CreateDataCatalog now integrate with AWS Glue connections. Users can create a Glue connection through Athena or use a Glue connection to define their Athena federated parameters. +* api-change:``bedrock-agent``: Amazon Bedrock Prompt Flows and Prompt Management now supports using inference profiles to increase throughput and improve resilience. +* api-change:``ec2``: Amazon EC2 G6e instances powered by NVIDIA L40S Tensor Core GPUs are the most cost-efficient GPU instances for deploying generative AI models and the highest performance GPU instances for spatial computing workloads. +* api-change:``emr-serverless``: This release adds support for job concurrency and queuing configuration at Application level. +* api-change:``glue``: Added AthenaProperties parameter to Glue Connections, allowing Athena to store service specific properties on Glue Connections. +* api-change:``rds``: Support ComputeRedundancy parameter in ModifyDBShardGroup API. Add DBShardGroupArn in DBShardGroup API response. Remove InvalidMaxAcuFault from CreateDBShardGroup and ModifyDBShardGroup API. Both API will throw InvalidParameterValueException for invalid ACU configuration. +* api-change:``resource-explorer-2``: AWS Resource Explorer released ListResources feature which allows customers to list all indexed AWS resources within a view. + + 1.35.24 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 4fa5c8d9da..d9cc18b45e 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.24' +__version__ = '1.35.25' class NullHandler(logging.Handler): diff --git a/botocore/data/apigateway/2015-07-09/service-2.json b/botocore/data/apigateway/2015-07-09/service-2.json index a86e1c4745..5d932aa169 100644 --- a/botocore/data/apigateway/2015-07-09/service-2.json +++ b/botocore/data/apigateway/2015-07-09/service-2.json @@ -3637,7 +3637,7 @@ }, "certificateUploadDate":{ "shape":"Timestamp", - "documentation":"
The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded.
" + "documentation":"The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate.
" }, "regionalDomainName":{ "shape":"String", diff --git a/botocore/data/athena/2017-05-18/service-2.json b/botocore/data/athena/2017-05-18/service-2.json index 66375fe17b..383a3814f5 100644 --- a/botocore/data/athena/2017-05-18/service-2.json +++ b/botocore/data/athena/2017-05-18/service-2.json @@ -1538,6 +1538,41 @@ "min":0, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "ConnectionType":{ + "type":"string", + "enum":[ + "DYNAMODB", + "MYSQL", + "POSTGRESQL", + "REDSHIFT", + "ORACLE", + "SYNAPSE", + "SQLSERVER", + "DB2", + "OPENSEARCH", + "BIGQUERY", + "GOOGLECLOUDSTORAGE", + "HBASE", + "DOCUMENTDB", + "MSK", + "NEPTUNE", + "CMDB", + "TPCDS", + "REDIS", + "CLOUDWATCH", + "TIMESTREAM", + "SAPHANA", + "SNOWFLAKE", + "TERADATA", + "VERTICA", + "CLOUDERAIMPALA", + "CLOUDERAHIVE", + "HORTONWORKSHIVE", + "DATALAKEGEN2", + "DB2AS400", + "CLOUDWATCHMETRICS" + ] + }, "CoordinatorDpuSize":{ "type":"integer", "box":true, @@ -1583,7 +1618,7 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"The type of data catalog to create: LAMBDA
for a federated catalog, HIVE
for an external hive metastore, or GLUE
for an Glue Data Catalog.
The type of data catalog to create: LAMBDA
for a federated catalog, GLUE
for an Glue Data Catalog, and HIVE
for an external Apache Hive metastore. FEDERATED
is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass.
Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.
For the HIVE
data catalog type, use the following syntax. The metadata-function
parameter is required. The sdk-version
parameter is optional and defaults to the currently supported version.
metadata-function=lambda_arn, sdk-version=version_number
For the LAMBDA
data catalog type, use one of the following sets of required parameters, but not both.
If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.
metadata-function=lambda_arn, record-function=lambda_arn
If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.
function=lambda_arn
The GLUE
type takes a catalog ID parameter and is required. The catalog_id
is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs.
catalog-id=catalog_id
The GLUE
data catalog type also applies to the default AwsDataCatalog
that already exists in your account, of which you can have only one and cannot modify.
Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.
For the HIVE
data catalog type, use the following syntax. The metadata-function
parameter is required. The sdk-version
parameter is optional and defaults to the currently supported version.
metadata-function=lambda_arn, sdk-version=version_number
For the LAMBDA
data catalog type, use one of the following sets of required parameters, but not both.
If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.
metadata-function=lambda_arn, record-function=lambda_arn
If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.
function=lambda_arn
The GLUE
type takes a catalog ID parameter and is required. The catalog_id
is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs.
catalog-id=catalog_id
The GLUE
data catalog type also applies to the default AwsDataCatalog
that already exists in your account, of which you can have only one and cannot modify.
The FEDERATED
data catalog type uses one of the following parameters, but not both. Use connection-arn
for an existing Glue connection. Use connection-type
and connection-properties
to specify the configuration setting for a new connection.
connection-arn:<glue_connection_arn_to_reuse>
lambda-role-arn
(optional): The execution role to use for the Lambda function. If not provided, one is created.
connection-type:MYSQL|REDSHIFT|...., connection-properties:\"<json_string>\"
For <json_string>
, use escaped JSON text, as in the following example.
\"{\\\"spill_bucket\\\":\\\"my_spill\\\",\\\"spill_prefix\\\":\\\"athena-spill\\\",\\\"host\\\":\\\"abc12345.snowflakecomputing.com\\\",\\\"port\\\":\\\"1234\\\",\\\"warehouse\\\":\\\"DEV_WH\\\",\\\"database\\\":\\\"TEST\\\",\\\"schema\\\":\\\"PUBLIC\\\",\\\"SecretArn\\\":\\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\\"}\"
The type of data catalog to create: LAMBDA
for a federated catalog, HIVE
for an external hive metastore, or GLUE
for an Glue Data Catalog.
The type of data catalog to create: LAMBDA
for a federated catalog, GLUE
for an Glue Data Catalog, and HIVE
for an external Apache Hive metastore. FEDERATED
is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass.
Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.
For the HIVE
data catalog type, use the following syntax. The metadata-function
parameter is required. The sdk-version
parameter is optional and defaults to the currently supported version.
metadata-function=lambda_arn, sdk-version=version_number
For the LAMBDA
data catalog type, use one of the following sets of required parameters, but not both.
If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.
metadata-function=lambda_arn, record-function=lambda_arn
If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.
function=lambda_arn
The GLUE
type takes a catalog ID parameter and is required. The catalog_id
is the account ID of the Amazon Web Services account to which the Glue catalog belongs.
catalog-id=catalog_id
The GLUE
data catalog type also applies to the default AwsDataCatalog
that already exists in your account, of which you can have only one and cannot modify.
Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.
For the HIVE
data catalog type, use the following syntax. The metadata-function
parameter is required. The sdk-version
parameter is optional and defaults to the currently supported version.
metadata-function=lambda_arn, sdk-version=version_number
For the LAMBDA
data catalog type, use one of the following sets of required parameters, but not both.
If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.
metadata-function=lambda_arn, record-function=lambda_arn
If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.
function=lambda_arn
The GLUE
type takes a catalog ID parameter and is required. The catalog_id
is the account ID of the Amazon Web Services account to which the Glue catalog belongs.
catalog-id=catalog_id
The GLUE
data catalog type also applies to the default AwsDataCatalog
that already exists in your account, of which you can have only one and cannot modify.
The FEDERATED
data catalog type uses one of the following parameters, but not both. Use connection-arn
for an existing Glue connection. Use connection-type
and connection-properties
to specify the configuration setting for a new connection.
connection-arn:<glue_connection_arn_to_reuse>
connection-type:MYSQL|REDSHIFT|...., connection-properties:\"<json_string>\"
For <json_string>
, use escaped JSON text, as in the following example.
\"{\\\"spill_bucket\\\":\\\"my_spill\\\",\\\"spill_prefix\\\":\\\"athena-spill\\\",\\\"host\\\":\\\"abc12345.snowflakecomputing.com\\\",\\\"port\\\":\\\"1234\\\",\\\"warehouse\\\":\\\"DEV_WH\\\",\\\"database\\\":\\\"TEST\\\",\\\"schema\\\":\\\"PUBLIC\\\",\\\"SecretArn\\\":\\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\\"}\"
The status of the creation or deletion of the data catalog.
The LAMBDA
, GLUE
, and HIVE
data catalog types are created synchronously. Their status is either CREATE_COMPLETE
or CREATE_FAILED
.
The FEDERATED
data catalog type is created asynchronously.
Data catalog creation status:
CREATE_IN_PROGRESS
: Federated data catalog creation in progress.
CREATE_COMPLETE
: Data catalog creation complete.
CREATE_FAILED
: Data catalog could not be created.
CREATE_FAILED_CLEANUP_IN_PROGRESS
: Federated data catalog creation failed and is being removed.
CREATE_FAILED_CLEANUP_COMPLETE
: Federated data catalog creation failed and was removed.
CREATE_FAILED_CLEANUP_FAILED
: Federated data catalog creation failed but could not be removed.
Data catalog deletion status:
DELETE_IN_PROGRESS
: Federated data catalog deletion in progress.
DELETE_COMPLETE
: Federated data catalog deleted.
DELETE_FAILED
: Federated data catalog could not be deleted.
The type of connection for a FEDERATED
data catalog (for example, REDSHIFT
, MYSQL
, or SQLSERVER
). For information about individual connectors, see Available data source connectors.
Text of the error that occurred during data catalog creation or deletion.
" } }, "documentation":"Contains information about a data catalog in an Amazon Web Services account.
In the Athena console, data catalogs are listed as \"data sources\" on the Data sources page under the Data source name column.
The data catalog type.
" + }, + "Status":{ + "shape":"DataCatalogStatus", + "documentation":"The status of the creation or deletion of the data catalog.
The LAMBDA
, GLUE
, and HIVE
data catalog types are created synchronously. Their status is either CREATE_COMPLETE
or CREATE_FAILED
.
The FEDERATED
data catalog type is created asynchronously.
Data catalog creation status:
CREATE_IN_PROGRESS
: Federated data catalog creation in progress.
CREATE_COMPLETE
: Data catalog creation complete.
CREATE_FAILED
: Data catalog could not be created.
CREATE_FAILED_CLEANUP_IN_PROGRESS
: Federated data catalog creation failed and is being removed.
CREATE_FAILED_CLEANUP_COMPLETE
: Federated data catalog creation failed and was removed.
CREATE_FAILED_CLEANUP_FAILED
: Federated data catalog creation failed but could not be removed.
Data catalog deletion status:
DELETE_IN_PROGRESS
: Federated data catalog deletion in progress.
DELETE_COMPLETE
: Federated data catalog deleted.
DELETE_FAILED
: Federated data catalog could not be deleted.
The type of connection for a FEDERATED
data catalog (for example, REDSHIFT
, MYSQL
, or SQLSERVER
). For information about individual connectors, see Available data source connectors.
Text of the error that occurred during data catalog creation or deletion.
" } }, "documentation":"The summary information for the data catalog, which includes its name and type.
" @@ -1828,7 +1902,8 @@ "enum":[ "LAMBDA", "GLUE", - "HIVE" + "HIVE", + "FEDERATED" ] }, "Database":{ @@ -1904,6 +1979,7 @@ "DeleteDataCatalogOutput":{ "type":"structure", "members":{ + "DataCatalog":{"shape":"DataCatalog"} } }, "DeleteNamedQueryInput":{ diff --git a/botocore/data/bedrock-agent/2023-06-05/service-2.json b/botocore/data/bedrock-agent/2023-06-05/service-2.json index 253826604a..4a519427e8 100644 --- a/botocore/data/bedrock-agent/2023-06-05/service-2.json +++ b/botocore/data/bedrock-agent/2023-06-05/service-2.json @@ -2313,7 +2313,7 @@ }, "foundationModel":{ "shape":"ModelIdentifier", - "documentation":"The foundation model to be used for orchestration by the agent you create.
" + "documentation":"The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create.
" }, "guardrailConfiguration":{ "shape":"GuardrailConfiguration", @@ -5190,8 +5190,8 @@ "documentation":"The unique identifier of the knowledge base to query.
" }, "modelId":{ - "shape":"ModelIdentifier", - "documentation":"The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.
" + "shape":"KnowledgeBaseModelIdentifier", + "documentation":"The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.
" } }, "documentation":"Contains configurations for a knowledge base node in a flow. This node takes a query as the input and returns, as the output, the retrieved responses directly (as an array) or a response generated based on the retrieved responses. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.
" @@ -5202,6 +5202,12 @@ "min":0, "pattern":"^[0-9a-zA-Z]+$" }, + "KnowledgeBaseModelIdentifier":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" + }, "KnowledgeBaseRoleArn":{ "type":"string", "max":2048, @@ -6301,7 +6307,7 @@ }, "modelId":{ "shape":"PromptModelIdentifier", - "documentation":"The unique identifier of the model to run inference with.
" + "documentation":"The unique identifier of the model or inference profile to run inference with.
" }, "templateConfiguration":{ "shape":"PromptTemplateConfiguration", @@ -6424,7 +6430,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" }, "PromptModelInferenceConfiguration":{ "type":"structure", @@ -6462,7 +6468,7 @@ "members":{ "overrideLambda":{ "shape":"LambdaArn", - "documentation":"The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations
must contain a parserMode
value that is set to OVERRIDDEN
. For more information, see Parser Lambda function in Agents for Amazon Bedrock.
The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations
must contain a parserMode
value that is set to OVERRIDDEN
. For more information, see Parser Lambda function in Amazon Bedrock Agents.
The unique identifier of the model with which to run inference on the prompt.
" + "documentation":"The unique identifier of the model or inference profile with which to run inference on the prompt.
" }, "name":{ "shape":"PromptVariantName", diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 32910a6be8..1f84c148d3 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -459,7 +459,7 @@ "requestUri":"/" }, "input":{"shape":"CancelConversionRequest"}, - "documentation":"Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.
For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI.
" + "documentation":"Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception.
" }, "CancelExportTask":{ "name":"CancelExportTask", @@ -4827,7 +4827,7 @@ }, "input":{"shape":"ImportInstanceRequest"}, "output":{"shape":"ImportInstanceResult"}, - "documentation":"We recommend that you use the ImportImage
API. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide.
Creates an import instance task using metadata from the specified disk image.
This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file.
This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead.
For information about the import manifest referenced by this API action, see VM Import Manifest.
" + "documentation":"We recommend that you use the ImportImage
API instead. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide.
Creates an import instance task using metadata from the specified disk image.
This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead.
For information about the import manifest referenced by this API action, see VM Import Manifest.
This API action is not supported by the Command Line Interface (CLI).
" }, "ImportKeyPair":{ "name":"ImportKeyPair", @@ -4857,7 +4857,7 @@ }, "input":{"shape":"ImportVolumeRequest"}, "output":{"shape":"ImportVolumeResult"}, - "documentation":"Creates an import volume task using metadata from the specified disk image.
This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead.
This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file.
For information about the import manifest referenced by this API action, see VM Import Manifest.
" + "documentation":"This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead.
Creates an import volume task using metadata from the specified disk image.
For information about the import manifest referenced by this API action, see VM Import Manifest.
This API action is not supported by the Command Line Interface (CLI).
" }, "ListImagesInRecycleBin":{ "name":"ListImagesInRecycleBin", @@ -17321,7 +17321,7 @@ "type":"structure", "members":{ "KeyName":{ - "shape":"KeyPairName", + "shape":"KeyPairNameWithResolver", "documentation":"The name of the key pair.
" }, "KeyPairId":{ @@ -19306,11 +19306,7 @@ }, "DescribeCapacityBlockOfferingsRequest":{ "type":"structure", - "required":[ - "InstanceType", - "InstanceCount", - "CapacityDurationHours" - ], + "required":["CapacityDurationHours"], "members":{ "DryRun":{ "shape":"Boolean", @@ -29887,7 +29883,11 @@ }, "FleetCapacityReservationUsageStrategy":{ "type":"string", - "enum":["use-capacity-reservations-first"] + "enum":[ + "use-capacity-reservations-first", + "use-capacity-reservations-only", + "none" + ] }, "FleetData":{ "type":"structure", @@ -37483,7 +37483,15 @@ "r8g.48xlarge", "r8g.metal-24xl", "r8g.metal-48xl", - "mac2-m1ultra.metal" + "mac2-m1ultra.metal", + "g6e.xlarge", + "g6e.2xlarge", + "g6e.4xlarge", + "g6e.8xlarge", + "g6e.12xlarge", + "g6e.16xlarge", + "g6e.24xlarge", + "g6e.48xlarge" ] }, "InstanceTypeHypervisor":{ @@ -39755,6 +39763,7 @@ } }, "KeyPairName":{"type":"string"}, + "KeyPairNameWithResolver":{"type":"string"}, "KeyType":{ "type":"string", "enum":[ @@ -54262,7 +54271,7 @@ "members":{ "Description":{ "shape":"String", - "documentation":"The description of the snapshot.
", + "documentation":"The description of the disk image being imported.
", "locationName":"description" }, "DiskImageSize":{ diff --git a/botocore/data/emr-serverless/2021-07-13/service-2.json b/botocore/data/emr-serverless/2021-07-13/service-2.json index d5e5c73744..e55f2a9295 100644 --- a/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2021-07-13", + "auth":["aws.auth#sigv4"], "endpointPrefix":"emr-serverless", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"EMR Serverless", "serviceId":"EMR Serverless", "signatureVersion":"v4", @@ -370,6 +371,10 @@ "interactiveConfiguration":{ "shape":"InteractiveConfiguration", "documentation":"The interactive configuration object that enables the interactive use cases for an application.
" + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.
" } }, "documentation":"Information about an application. Amazon EMR Serverless uses applications to run jobs.
" @@ -728,6 +733,10 @@ "interactiveConfiguration":{ "shape":"InteractiveConfiguration", "documentation":"The interactive configuration object that enables the interactive use cases to use when running an application.
" + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.
" } } }, @@ -793,7 +802,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:(\\d{12})?:key\\/[a-zA-Z0-9-]+" + "pattern":"arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:([0-9]{12}):key\\/[a-zA-Z0-9-]+" }, "EngineType":{ "type":"string", @@ -943,7 +952,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:(aws[a-zA-Z0-9-]*):iam::(\\d{12})?:(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)" + "pattern":"arn:(aws[a-zA-Z0-9-]*):iam::([0-9]{12}):(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)" }, "ImageConfiguration":{ "type":"structure", @@ -1168,6 +1177,18 @@ "attemptUpdatedAt":{ "shape":"Date", "documentation":"The date and time of when the job run attempt was last updated.
" + }, + "startedAt":{ + "shape":"Date", + "documentation":"The date and time when the job moved to the RUNNING state.
" + }, + "endedAt":{ + "shape":"Date", + "documentation":"The date and time when the job was terminated.
" + }, + "queuedDurationMilliseconds":{ + "shape":"Long", + "documentation":"The total time for a job in the QUEUED state in milliseconds.
" } }, "documentation":"Information about a job run. A job run is a unit of work, such as a Spark JAR, Hive query, or SparkSQL query, that you submit to an Amazon EMR Serverless application.
" @@ -1278,7 +1299,8 @@ "SUCCESS", "FAILED", "CANCELLING", - "CANCELLED" + "CANCELLED", + "QUEUED" ] }, "JobRunStateSet":{ @@ -1591,6 +1613,10 @@ "min":1, "pattern":"[a-zA-Z]+[-_]*[a-zA-Z]+" }, + "Long":{ + "type":"long", + "box":true + }, "ManagedPersistenceMonitoringConfiguration":{ "type":"structure", "members":{ @@ -1780,6 +1806,20 @@ }, "documentation":"The Amazon S3 configuration for monitoring log publishing. You can configure your jobs to send log information to Amazon S3.
" }, + "SchedulerConfiguration":{ + "type":"structure", + "members":{ + "queueTimeoutMinutes":{ + "shape":"Integer", + "documentation":"The maximum duration in minutes for the job in QUEUED state. If scheduler configuration is enabled on your application, the default value is 360 minutes (6 hours). The valid range is from 15 to 720.
" + }, + "maxConcurrentRuns":{ + "shape":"Integer", + "documentation":"The maximum concurrent job runs on this application. If scheduler configuration is enabled on your application, the default value is 15. The valid range is 1 to 1000.
" + } + }, + "documentation":"The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.
" + }, "SecurityGroupIds":{ "type":"list", "member":{"shape":"SecurityGroupString"}, @@ -2128,6 +2168,10 @@ "monitoringConfiguration":{ "shape":"MonitoringConfiguration", "documentation":"The configuration setting for monitoring.
" + }, + "schedulerConfiguration":{ + "shape":"SchedulerConfiguration", + "documentation":"The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.
" } } }, diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index 4031dda562..6e4716c11d 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -4230,13 +4230,13 @@ "shape":"AuthenticationType", "documentation":"A structure containing the authentication configuration in the CreateConnection request.
" }, - "SecretArn":{ - "shape":"SecretArn", - "documentation":"The secret manager ARN to store credentials in the CreateConnection request.
" - }, "OAuth2Properties":{ "shape":"OAuth2PropertiesInput", "documentation":"The properties for OAuth2 authentication in the CreateConnection request.
" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"The secret manager ARN to store credentials in the CreateConnection request.
" } }, "documentation":"A structure containing the authentication configuration in the CreateConnection request.
" @@ -4253,7 +4253,8 @@ "type":"string", "max":4096, "min":1, - "pattern":"\\S+" + "pattern":"\\S+", + "sensitive":true }, "AuthorizationCodeProperties":{ "type":"structure", @@ -6684,6 +6685,10 @@ "shape":"ConnectionProperties", "documentation":"These key-value pairs define parameters for the connection:
HOST
- The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT
- The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME
- The name under which to log in to the database. The value string for USER_NAME
is \"USERNAME
\".
PASSWORD
- A password, if one is used, for the user name.
ENCRYPTED_PASSWORD
- When you enable connection password protection by setting ConnectionPasswordEncryption
in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI
- The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME
- The class name of the JDBC driver to use.
JDBC_ENGINE
- The name of the JDBC engine to use.
JDBC_ENGINE_VERSION
- The version of the JDBC engine to use.
CONFIG_FILES
- (Reserved for future use.)
INSTANCE_ID
- The instance ID to use.
JDBC_CONNECTION_URL
- The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL
- A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT
- An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION
- By default, this is false
. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true
to skip Glue’s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING
- A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN
; in Microsoft SQL Server, this is used as the hostNameInCertificate
.
CONNECTION_URL
- The URL for connecting to a general (non-JDBC) data source.
SECRET_ID
- The secret ID used for the secret manager of credentials.
CONNECTOR_URL
- The connector URL for a MARKETPLACE or CUSTOM connection.
CONNECTOR_TYPE
- The connector type for a MARKETPLACE or CUSTOM connection.
CONNECTOR_CLASS_NAME
- The connector class name for a MARKETPLACE or CUSTOM connection.
KAFKA_BOOTSTRAP_SERVERS
- A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
KAFKA_SSL_ENABLED
- Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".
KAFKA_CUSTOM_CERT
- The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.
KAFKA_SKIP_CUSTOM_CERT_VALIDATION
- Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".
KAFKA_CLIENT_KEYSTORE
- The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).
KAFKA_CLIENT_KEYSTORE_PASSWORD
- The password to access the provided keystore (Optional).
KAFKA_CLIENT_KEY_PASSWORD
- A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).
ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD
- The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).
ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD
- The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_MECHANISM
- \"SCRAM-SHA-512\"
, \"GSSAPI\"
, \"AWS_MSK_IAM\"
, or \"PLAIN\"
. These are the supported SASL Mechanisms.
KAFKA_SASL_PLAIN_USERNAME
- A plaintext username used to authenticate with the \"PLAIN\" mechanism.
KAFKA_SASL_PLAIN_PASSWORD
- A plaintext password used to authenticate with the \"PLAIN\" mechanism.
ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD
- The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_SCRAM_USERNAME
- A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.
KAFKA_SASL_SCRAM_PASSWORD
- A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.
ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD
- The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_SCRAM_SECRETS_ARN
- The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.
KAFKA_SASL_GSSAPI_KEYTAB
- The S3 location of a Kerberos keytab
file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.
KAFKA_SASL_GSSAPI_KRB5_CONF
- The S3 location of a Kerberos krb5.conf
file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.
KAFKA_SASL_GSSAPI_SERVICE
- The Kerberos service name, as set with sasl.kerberos.service.name
in your Kafka Configuration.
KAFKA_SASL_GSSAPI_PRINCIPAL
- The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.
ROLE_ARN
- The role to be used for running queries.
REGION
- The Amazon Web Services Region where queries will be run.
WORKGROUP_NAME
- The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run.
CLUSTER_IDENTIFIER
- The cluster identifier of an Amazon Redshift cluster in which queries will run.
DATABASE
- The Amazon Redshift database that you are connecting to.
This field is not currently used.
" + }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", "documentation":"The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup
, that are needed to make this connection successfully.
These key-value pairs define parameters for the connection.
" }, + "AthenaProperties":{ + "shape":"PropertyMap", + "documentation":"This field is not currently used.
" + }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", "documentation":"The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup
, that are needed to successfully make this connection.
Specifies the job and session values that an admin configures in an Glue usage profile.
" }, + "PropertyKey":{ + "type":"string", + "max":128, + "min":1 + }, + "PropertyMap":{ + "type":"map", + "key":{"shape":"PropertyKey"}, + "value":{"shape":"PropertyValue"} + }, "PropertyPredicate":{ "type":"structure", "members":{ @@ -17978,6 +17997,11 @@ }, "documentation":"Defines a property predicate.
" }, + "PropertyValue":{ + "type":"string", + "max":2048, + "min":1 + }, "PublicKeysList":{ "type":"list", "member":{"shape":"GenericString"}, diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 98b6238342..5c727bc9d4 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -553,7 +553,6 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"MaxDBShardGroupLimitReached"}, {"shape":"InvalidDBClusterStateFault"}, - {"shape":"InvalidMaxAcuFault"}, {"shape":"UnsupportedDBEngineVersionFault"}, {"shape":"InvalidVPCNetworkStateFault"} ], @@ -2166,8 +2165,7 @@ "errors":[ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBShardGroupAlreadyExistsFault"}, - {"shape":"DBShardGroupNotFoundFault"}, - {"shape":"InvalidMaxAcuFault"} + {"shape":"DBShardGroupNotFoundFault"} ], "documentation":"Modifies the settings of an Aurora Limitless Database DB shard group. You can change one or more settings by specifying these parameters and the new values in the request.
" }, @@ -2703,7 +2701,7 @@ {"shape":"CertificateNotFoundFault"}, {"shape":"TenantDatabaseQuotaExceededFault"} ], - "documentation":"Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment.
If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot
operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier
in the call to the RestoreDBInstanceFromDBSnapshot
operation. The result is that you replace the original DB instance with the DB instance created from the snapshot.
If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier
must be the ARN of the shared DB snapshot.
To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot
.
Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment.
If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot
operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier
in the call to the RestoreDBInstanceFromDBSnapshot
operation. The result is that you replace the original DB instance with the DB instance created from the snapshot.
If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier
must be the ARN of the shared DB snapshot.
To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot
.
The name of the DB parameter group to associate with this DB instance.
If you don't specify a value for DBParameterGroupName
, then Amazon RDS uses the DBParameterGroup
of the source DB instance for a same Region read replica, or the default DBParameterGroup
for the specified DB engine for a cross-Region read replica.
Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.
Constraints:
Must be 1 to 255 letters, numbers, or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
The name of the DB parameter group to associate with this read replica DB instance.
For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName
, then Amazon RDS uses the DBParameterGroup
of the source DB instance for a same Region read replica, or the default DBParameterGroup
for the specified DB engine for a cross-Region read replica.
For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName
, then Amazon RDS uses the default DBParameterGroup
.
Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.
Constraints:
Must be 1 to 255 letters, numbers, or hyphens.
First character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Specifies whether to create standby instances for the DB shard group. Valid values are the following:
0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.
1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.
2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:
0 - Creates a DB shard group without a standby DB shard group. This is the default value.
1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).
2 - Creates a DB shard group with two standby DB shard groups in two different AZs.
Specifies whether to create standby instances for the DB shard group. Valid values are the following:
0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.
1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.
2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.
Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:
0 - Creates a DB shard group without a standby DB shard group. This is the default value.
1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).
2 - Creates a DB shard group with two standby DB shard groups in two different AZs.
The connection endpoint for the DB shard group.
" + }, + "DBShardGroupArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) for the DB shard group.
" } } }, @@ -11783,18 +11785,6 @@ }, "exception":true }, - "InvalidMaxAcuFault":{ - "type":"structure", - "members":{ - }, - "documentation":"The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs).
", - "error":{ - "code":"InvalidMaxAcu", - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, "InvalidOptionGroupStateFault":{ "type":"structure", "members":{ @@ -12858,6 +12848,10 @@ "MinACU":{ "shape":"DoubleOptional", "documentation":"The minimum capacity of the DB shard group in Aurora capacity units (ACUs).
" + }, + "ComputeRedundancy":{ + "shape":"IntegerOptional", + "documentation":"Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:
0 - Creates a DB shard group without a standby DB shard group. This is the default value.
1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).
2 - Creates a DB shard group with two standby DB shard groups in two different AZs.
Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account or a delegated administrator with service access enabled can invoke this API call.
" + "documentation":"Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account can invoke this API call.
" }, "GetDefaultView":{ "name":"GetDefaultView", @@ -247,6 +249,25 @@ ], "documentation":"Retrieves a list of a member's indexes in all Amazon Web Services Regions that are currently collecting resource information for Amazon Web Services Resource Explorer. Only the management account or a delegated administrator with service access enabled can invoke this API call.
" }, + "ListResources":{ + "name":"ListResources", + "http":{ + "method":"POST", + "requestUri":"/ListResources", + "responseCode":200 + }, + "input":{"shape":"ListResourcesInput"}, + "output":{"shape":"ListResourcesOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Returns a list of resources and their details that match the specified criteria. This query must use a view. If you don’t explicitly specify a view, then Resource Explorer uses the default view for the Amazon Web Services Region in which you call this operation.
" + }, "ListSupportedResourceTypes":{ "name":"ListSupportedResourceTypes", "http":{ @@ -903,6 +924,67 @@ } } }, + "ListResourcesInput":{ + "type":"structure", + "members":{ + "Filters":{"shape":"SearchFilter"}, + "MaxResults":{ + "shape":"ListResourcesInputMaxResultsInteger", + "documentation":"The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken
response element is present and has a value (is not null). Include that value as the NextToken
request parameter in the next call to the operation to get the next part of the results.
An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken
after every operation to ensure that you receive all of the results.
The parameter for receiving additional results if you receive a NextToken
response in a previous request. A NextToken
response indicates that more output is available. Set this parameter to the value of the previous call's NextToken
response to indicate where the output should continue from. The pagination tokens expire after 24 hours.
Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the Amazon Web Services Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a 401 Unauthorized exception.
" + } + } + }, + "ListResourcesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListResourcesInputNextTokenString":{ + "type":"string", + "max":2048, + "min":1 + }, + "ListResourcesInputViewArnString":{ + "type":"string", + "max":1000, + "min":0 + }, + "ListResourcesOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"ListResourcesOutputNextTokenString", + "documentation":"If present, indicates that more output is available than is included in the current response. Use this value in the NextToken
request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken
response element comes back as null
. The pagination tokens expire after 24 hours.
The list of structures that describe the resources that match the query.
" + }, + "ViewArn":{ + "shape":"ListResourcesOutputViewArnString", + "documentation":"The Amazon resource name (ARN) of the view that this operation used to perform the search.
" + } + } + }, + "ListResourcesOutputNextTokenString":{ + "type":"string", + "max":2048, + "min":1 + }, + "ListResourcesOutputViewArnString":{ + "type":"string", + "max":1011, + "min":1 + }, "ListSupportedResourceTypesInput":{ "type":"structure", "members":{ @@ -1035,7 +1117,7 @@ }, "QueryString":{ "type":"string", - "max":1011, + "max":1280, "min":0, "sensitive":true }, @@ -1072,7 +1154,7 @@ }, "Service":{ "shape":"String", - "documentation":"The Amazon Web Service that owns the resource and is responsible for creating and updating it.
" + "documentation":"The Amazon Web Servicesservice that owns the resource and is responsible for creating and updating it.
" } }, "documentation":"A resource in Amazon Web Services that Amazon Web Services Resource Explorer has discovered, and for which it has stored information in the index of the Amazon Web Services Region that contains the resource.
" @@ -1259,7 +1341,7 @@ }, "Service":{ "shape":"String", - "documentation":"The Amazon Web Service that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type.
" + "documentation":"The Amazon Web Servicesservice that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type.
" } }, "documentation":"A structure that describes a resource type supported by Amazon Web Services Resource Explorer.
" diff --git a/docs/source/conf.py b/docs/source/conf.py index e1a6f9bc6d..0e1c72cf70 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.24' +release = '1.35.25' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.