diff --git a/.changes/1.31.0.json b/.changes/1.31.0.json new file mode 100644 index 0000000000..08e708884d --- /dev/null +++ b/.changes/1.31.0.json @@ -0,0 +1,42 @@ +[ + { + "category": "``ec2``", + "description": "Add Nitro Enclaves support on DescribeInstanceTypes", + "type": "api-change" + }, + { + "category": "``location``", + "description": "This release adds support for authenticating with Amazon Location Service's Places & Routes APIs with an API Key. Also, with this release developers can publish tracked device position updates to Amazon EventBridge.", + "type": "api-change" + }, + { + "category": "``outposts``", + "description": "Added paginator support to several APIs. Added the ISOLATED enum value to AssetState.", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "This release includes below three changes: small multiples axes improvement, field based coloring, removed required trait from Aggregation function for TopBottomFilter.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates Amazon RDS documentation for creating DB instances and creating Aurora global clusters.", + "type": "api-change" + }, + { + "category": "configprovider", + "description": "Fix bug when deep copying config value store where overrides were not preserved", + "type": "bugfix" + }, + { + "category": "configprovider", + "description": "Always use shallow copy of session config value store for clients", + "type": "enhancement" + }, + { + "category": "configuration", + "description": "Configure the endpoint URL in the shared configuration file or via an environment variable for a specific AWS service or all AWS services.", + "type": "feature" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index efe51b572d..0e980c471d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,19 @@ CHANGELOG ========= +1.31.0 +====== + +* api-change:``ec2``: Add Nitro Enclaves support on DescribeInstanceTypes +* api-change:``location``: This release adds support for authenticating with Amazon Location Service's Places & Routes APIs with an API Key. Also, with this release developers can publish tracked device position updates to Amazon EventBridge. +* api-change:``outposts``: Added paginator support to several APIs. Added the ISOLATED enum value to AssetState. +* api-change:``quicksight``: This release includes below three changes: small multiples axes improvement, field based coloring, removed required trait from Aggregation function for TopBottomFilter. +* api-change:``rds``: Updates Amazon RDS documentation for creating DB instances and creating Aurora global clusters. +* bugfix:configprovider: Fix bug when deep copying config value store where overrides were not preserved +* enhancement:configprovider: Always use shallow copy of session config value store for clients +* feature:configuration: Configure the endpoint URL in the shared configuration file or via an environment variable for a specific AWS service or all AWS services. + + 1.30.1 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index aa346e87f3..a8a3ed4468 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.30.1' +__version__ = '1.31.0' class NullHandler(logging.Handler): diff --git a/botocore/args.py b/botocore/args.py index 7f4766bf76..73c8ab45e0 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -115,7 +115,7 @@ def get_client_args( s3_config = final_args['s3_config'] partition = endpoint_config['metadata'].get('partition', None) socket_options = final_args['socket_options'] - + configured_endpoint_url = final_args['configured_endpoint_url'] signing_region = endpoint_config['signing_region'] endpoint_region_name = endpoint_config['region_name'] @@ -160,7 +160,7 @@ def get_client_args( service_model, endpoint_region_name, region_name, - endpoint_url, + configured_endpoint_url, endpoint, is_secure, endpoint_bridge, @@ -210,10 +210,16 @@ def compute_client_args( parameter_validation = ensure_boolean(raw_value) s3_config = self.compute_s3_config(client_config) + + configured_endpoint_url = self._compute_configured_endpoint_url( + client_config=client_config, + endpoint_url=endpoint_url, + ) + endpoint_config = self._compute_endpoint_config( service_name=service_name, region_name=region_name, - endpoint_url=endpoint_url, + endpoint_url=configured_endpoint_url, is_secure=is_secure, endpoint_bridge=endpoint_bridge, s3_config=s3_config, @@ -270,6 +276,7 @@ def compute_client_args( return { 'service_name': service_name, 'parameter_validation': parameter_validation, + 'configured_endpoint_url': configured_endpoint_url, 'endpoint_config': endpoint_config, 'protocol': protocol, 'config_kwargs': config_kwargs, @@ -279,6 +286,27 @@ def compute_client_args( ), } + def _compute_configured_endpoint_url(self, client_config, endpoint_url): + if endpoint_url is not None: + return endpoint_url + + if self._ignore_configured_endpoint_urls(client_config): + logger.debug("Ignoring configured endpoint URLs.") + return endpoint_url + + return self._config_store.get_config_variable('endpoint_url') + + def _ignore_configured_endpoint_urls(self, client_config): + if ( + client_config + and client_config.ignore_configured_endpoint_urls is not None + ): + return client_config.ignore_configured_endpoint_urls + + return self._config_store.get_config_variable( + 'ignore_configured_endpoint_urls' + ) + def compute_s3_config(self, client_config): s3_configuration = self._config_store.get_config_variable('s3') diff --git a/botocore/config.py b/botocore/config.py index 6ce25f8b60..be3a475fa7 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -194,6 +194,13 @@ class Config: Defaults to None. + :type ignore_configured_endpoint_urls: bool + :param ignore_configured_endpoint_urls: Setting to True disables use + of endpoint URLs provided via environment variables and + the shared configuration file. + + Defaults to None. + :type tcp_keepalive: bool :param tcp_keepalive: Enables the TCP Keep-Alive socket option used when creating new connections if set to True. @@ -221,6 +228,7 @@ class Config: ('endpoint_discovery_enabled', None), ('use_dualstack_endpoint', None), ('use_fips_endpoint', None), + ('ignore_configured_endpoint_urls', None), ('defaults_mode', None), ('tcp_keepalive', None), ] diff --git a/botocore/configloader.py b/botocore/configloader.py index 245d9d8eb7..0b6c82bcad 100644 --- a/botocore/configloader.py +++ b/botocore/configloader.py @@ -200,6 +200,17 @@ def _parse_nested(config_value): return parsed +def _parse_section(key, values): + result = {} + try: + parts = shlex.split(key) + except ValueError: + return result + if len(parts) == 2: + result[parts[1]] = values + return result + + def build_profile_map(parsed_ini_config): """Convert the parsed INI config into a profile map. @@ -254,22 +265,15 @@ def build_profile_map(parsed_ini_config): parsed_config = copy.deepcopy(parsed_ini_config) profiles = {} sso_sessions = {} + services = {} final_config = {} for key, values in parsed_config.items(): if key.startswith("profile"): - try: - parts = shlex.split(key) - except ValueError: - continue - if len(parts) == 2: - profiles[parts[1]] = values + profiles.update(_parse_section(key, values)) elif key.startswith("sso-session"): - try: - parts = shlex.split(key) - except ValueError: - continue - if len(parts) == 2: - sso_sessions[parts[1]] = values + sso_sessions.update(_parse_section(key, values)) + elif key.startswith("services"): + services.update(_parse_section(key, values)) elif key == 'default': # default section is special and is considered a profile # name but we don't require you use 'profile "default"' @@ -279,4 +283,5 @@ def build_profile_map(parsed_ini_config): final_config[key] = values final_config['profiles'] = profiles final_config['sso_sessions'] = sso_sessions + final_config['services'] = services return final_config diff --git a/botocore/configprovider.py b/botocore/configprovider.py index e2ebd5efe3..d7b2e19de8 100644 --- a/botocore/configprovider.py +++ b/botocore/configprovider.py @@ -18,6 +18,7 @@ import os from botocore import utils +from botocore.exceptions import InvalidConfigError logger = logging.getLogger(__name__) @@ -108,6 +109,12 @@ None, utils.ensure_boolean, ), + 'ignore_configured_endpoint_urls': ( + 'ignore_configured_endpoint_urls', + 'AWS_IGNORE_CONFIGURED_ENDPOINT_URLS', + None, + utils.ensure_boolean, + ), 'parameter_validation': ('parameter_validation', None, True, None), # Client side monitoring configurations. # Note: These configurations are considered internal to botocore. @@ -403,7 +410,18 @@ def __init__(self, mapping=None): self.set_config_provider(logical_name, provider) def __deepcopy__(self, memo): - return ConfigValueStore(copy.deepcopy(self._mapping, memo)) + config_store = ConfigValueStore(copy.deepcopy(self._mapping, memo)) + for logical_name, override_value in self._overrides.items(): + config_store.set_config_variable(logical_name, override_value) + + return config_store + + def __copy__(self): + config_store = ConfigValueStore(copy.copy(self._mapping)) + for logical_name, override_value in self._overrides.items(): + config_store.set_config_variable(logical_name, override_value) + + return config_store def get_config_variable(self, logical_name): """ @@ -543,24 +561,28 @@ def resolve_auto_mode(self, region_name): return 'standard' def _update_provider(self, config_store, variable, value): - provider = config_store.get_config_provider(variable) + original_provider = config_store.get_config_provider(variable) default_provider = ConstantProvider(value) - if isinstance(provider, ChainProvider): - provider.set_default_provider(default_provider) - return - elif isinstance(provider, BaseProvider): + if isinstance(original_provider, ChainProvider): + chain_provider_copy = copy.deepcopy(original_provider) + chain_provider_copy.set_default_provider(default_provider) + default_provider = chain_provider_copy + elif isinstance(original_provider, BaseProvider): default_provider = ChainProvider( - providers=[provider, default_provider] + providers=[original_provider, default_provider] ) config_store.set_config_provider(variable, default_provider) def _update_section_provider( self, config_store, section_name, variable, value ): - section_provider = config_store.get_config_provider(section_name) - section_provider.set_default_provider( + section_provider_copy = copy.deepcopy( + config_store.get_config_provider(section_name) + ) + section_provider_copy.set_default_provider( variable, ConstantProvider(value) ) + config_store.set_config_provider(section_name, section_provider_copy) def _set_retryMode(self, config_store, value): self._update_provider(config_store, 'retry_mode', value) @@ -837,3 +859,142 @@ def provide(self): def __repr__(self): return 'ConstantProvider(value=%s)' % self._value + + +class ConfiguredEndpointProvider(BaseProvider): + """Lookup an endpoint URL from environment variable or shared config file. + + NOTE: This class is considered private and is subject to abrupt breaking + changes or removal without prior announcement. Please do not use it + directly. + """ + + _ENDPOINT_URL_LOOKUP_ORDER = [ + 'environment_service', + 'environment_global', + 'config_service', + 'config_global', + ] + + def __init__( + self, + full_config, + scoped_config, + client_name, + environ=None, + ): + """Initialize a ConfiguredEndpointProviderChain. + + :type full_config: dict + :param full_config: This is the dict representing the full + configuration file. + + :type scoped_config: dict + :param scoped_config: This is the dict representing the configuration + for the current profile for the session. + + :type client_name: str + :param client_name: The name used to instantiate a client using + botocore.session.Session.create_client. + + :type environ: dict + :param environ: A mapping to use for environment variables. If this + is not provided it will default to use os.environ. + """ + self._full_config = full_config + self._scoped_config = scoped_config + self._client_name = client_name + self._transformed_service_id = self._get_snake_case_service_id( + self._client_name + ) + if environ is None: + environ = os.environ + self._environ = environ + + def provide(self): + """Lookup the configured endpoint URL. + + The order is: + + 1. The value provided by a service-specific environment variable. + 2. The value provided by the global endpoint environment variable + (AWS_ENDPOINT_URL). + 3. The value provided by a service-specific parameter from a services + definition section in the shared configuration file. + 4. The value provided by the global parameter from a services + definition section in the shared configuration file. + """ + for location in self._ENDPOINT_URL_LOOKUP_ORDER: + logger.debug( + 'Looking for endpoint for %s via: %s', + self._client_name, + location, + ) + + endpoint_url = getattr(self, f'_get_endpoint_url_{location}')() + + if endpoint_url: + logger.info( + 'Found endpoint for %s via: %s.', + self._client_name, + location, + ) + return endpoint_url + + logger.debug('No configured endpoint found.') + return None + + def _get_snake_case_service_id(self, client_name): + # Get the service ID without loading the service data file, accounting + # for any aliases and standardizing the names with hyphens. + client_name = utils.SERVICE_NAME_ALIASES.get(client_name, client_name) + hyphenized_service_id = ( + utils.CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES.get( + client_name, client_name + ) + ) + return hyphenized_service_id.replace('-', '_') + + def _get_service_env_var_name(self): + transformed_service_id_env = self._transformed_service_id.upper() + return f'AWS_ENDPOINT_URL_{transformed_service_id_env}' + + def _get_services_config(self): + if 'services' not in self._scoped_config: + return {} + + section_name = self._scoped_config['services'] + services_section = self._full_config.get('services', {}).get( + section_name + ) + + if not services_section: + error_msg = ( + f'The profile is configured to use the services ' + f'section but the "{section_name}" services ' + f'configuration does not exist.' + ) + raise InvalidConfigError(error_msg=error_msg) + + return services_section + + def _get_endpoint_url_config_service(self): + snakecase_service_id = self._transformed_service_id.lower() + return ( + self._get_services_config() + .get(snakecase_service_id, {}) + .get('endpoint_url') + ) + + def _get_endpoint_url_config_global(self): + return self._scoped_config.get('endpoint_url') + + def _get_endpoint_url_environment_service(self): + return EnvironmentProvider( + name=self._get_service_env_var_name(), env=self._environ + ).provide() + + def _get_endpoint_url_environment_global(self): + return EnvironmentProvider( + name='AWS_ENDPOINT_URL', env=self._environ + ).provide() diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index e3c1e8bb59..3a860699dc 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -5805,7 +5805,7 @@ }, "input":{"shape":"StopInstancesRequest"}, "output":{"shape":"StopInstancesResult"}, - "documentation":"

Stops an Amazon EBS-backed instance. For more information, see Stop and start your instance in the Amazon EC2 User Guide.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide.

" + "documentation":"

Stops an Amazon EBS-backed instance. For more information, see Stop and start your instance in the Amazon EC2 User Guide.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide.

" }, "TerminateClientVpnConnections":{ "name":"TerminateClientVpnConnections", @@ -30918,21 +30918,21 @@ "members":{ "Configured":{ "shape":"Boolean", - "documentation":"

If this parameter is set to true, your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.

", + "documentation":"

If true, your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.

", "locationName":"configured" } }, - "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

" + "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

" }, "HibernationOptionsRequest":{ "type":"structure", "members":{ "Configured":{ "shape":"Boolean", - "documentation":"

If you set this parameter to true, your instance is enabled for hibernation.

Default: false

" + "documentation":"

Set to true to enable your instance for hibernation.

Default: false

" } }, - "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

" + "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

" }, "HistoryRecord":{ "type":"structure", @@ -35406,6 +35406,11 @@ "shape":"BootModeTypeList", "documentation":"

The supported boot modes. For more information, see Boot modes in the Amazon EC2 User Guide.

", "locationName":"supportedBootModes" + }, + "NitroEnclavesSupport":{ + "shape":"NitroEnclavesSupport", + "documentation":"

Indicates whether Nitro Enclaves is supported.

", + "locationName":"nitroEnclavesSupport" } }, "documentation":"

Describes the instance type.

" @@ -43643,6 +43648,13 @@ } }, "NextToken":{"type":"string"}, + "NitroEnclavesSupport":{ + "type":"string", + "enum":[ + "unsupported", + "supported" + ] + }, "OccurrenceDayRequestSet":{ "type":"list", "member":{ @@ -44543,7 +44555,7 @@ }, "PlacementGroupArn":{ "type":"string", - "pattern":"^arn:aws([a-z-]+)?:ec2:[a-z\\d-]+:\\d{12}:placement-group/([^\\s].+[^\\s]){1,255}$" + "pattern":"^arn:aws([a-z-]+)?:ec2:[a-z\\d-]+:\\d{12}:placement-group/^.{1,255}$" }, "PlacementGroupId":{"type":"string"}, "PlacementGroupIdStringList":{ @@ -49124,7 +49136,7 @@ }, "HibernationOptions":{ "shape":"HibernationOptionsRequest", - "documentation":"

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance.

" + "documentation":"

Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.

You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance.

" }, "LicenseSpecifications":{ "shape":"LicenseSpecificationListRequest", @@ -49137,7 +49149,7 @@ }, "EnclaveOptions":{ "shape":"EnclaveOptionsRequest", - "documentation":"

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.

" + "documentation":"

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.

" }, "PrivateDnsNameOptions":{ "shape":"PrivateDnsNameOptionsRequest", @@ -52784,7 +52796,7 @@ "documentation":"

The ID of the Client VPN endpoint to which the client is connected.

" }, "ConnectionId":{ - "shape":"VpnConnectionId", + "shape":"String", "documentation":"

The ID of the client connection to be terminated.

" }, "Username":{ diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 4f3162af01..7a6715618e 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -10156,12 +10156,14 @@ }, "mediaconnect" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, diff --git a/botocore/data/location/2020-11-19/service-2.json b/botocore/data/location/2020-11-19/service-2.json index e58db3bed5..dd0bb40ca0 100644 --- a/botocore/data/location/2020-11-19/service-2.json +++ b/botocore/data/location/2020-11-19/service-2.json @@ -223,7 +223,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates an API key resource in your Amazon Web Services account, which lets you grant geo:GetMap* actions for Amazon Location Map resources to the API key bearer.

The API keys feature is in preview. We may add, change, or remove features before announcing general availability. For more information, see Using API keys.

", + "documentation":"

Creates an API key resource in your Amazon Web Services account, which lets you grant actions for Amazon Location resources to the API key bearer.

For more information, see Using API keys.

", "endpoint":{"hostPrefix":"metadata."}, "idempotent":true }, @@ -466,7 +466,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the API key resource details.

The API keys feature is in preview. We may add, change, or remove features before announcing general availability. For more information, see Using API keys.

", + "documentation":"

Retrieves the API key resource details.

", "endpoint":{"hostPrefix":"metadata."} }, "DescribeMap":{ @@ -786,7 +786,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists API key resources in your Amazon Web Services account.

The API keys feature is in preview. We may add, change, or remove features before announcing general availability. For more information, see Using API keys.

", + "documentation":"

Lists API key resources in your Amazon Web Services account.

", "endpoint":{"hostPrefix":"metadata."} }, "ListMaps":{ @@ -1051,7 +1051,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the specified properties of a given API key resource.

The API keys feature is in preview. We may add, change, or remove features before announcing general availability. For more information, see Using API keys.

", + "documentation":"

Updates the specified properties of a given API key resource.

", "endpoint":{"hostPrefix":"metadata."}, "idempotent":true }, @@ -1184,7 +1184,7 @@ "members":{ "AllowActions":{ "shape":"ApiKeyRestrictionsAllowActionsList", - "documentation":"

A list of allowed actions that an API key resource grants permissions to perform

Currently, the only valid action is geo:GetMap* as an input to the list. For example, [\"geo:GetMap*\"] is valid but [\"geo:GetMapTile\"] is not.

" + "documentation":"

A list of allowed actions that an API key resource grants permissions to perform. You must have at least one action for each type of resource. For example, if you have a place resource, you must include at least one place action.

The following are valid values for the actions.

You must use these strings exactly. For example, to provide access to map rendering, the only valid action is geo:GetMap* as an input to the list. [\"geo:GetMap*\"] is valid but [\"geo:GetMapTile\"] is not. Similarly, you cannot use [\"geo:SearchPlaceIndexFor*\"] - you must list each of the Place actions separately.

" }, "AllowReferers":{ "shape":"ApiKeyRestrictionsAllowReferersList", @@ -1192,7 +1192,7 @@ }, "AllowResources":{ "shape":"ApiKeyRestrictionsAllowResourcesList", - "documentation":"

A list of allowed resource ARNs that a API key bearer can perform actions on

For more information about ARN format, see Amazon Resource Names (ARNs).

In this preview, you can allow only map resources.

Requirements:

" + "documentation":"

A list of allowed resource ARNs that a API key bearer can perform actions on.

For more information about ARN format, see Amazon Resource Names (ARNs).

" } }, "documentation":"

API Restrictions on the allowed actions, resources, and referers for an API key resource.

" @@ -1576,7 +1576,7 @@ }, "GeofenceProperties":{ "shape":"PropertyMap", - "documentation":"

Specifies additional user-defined properties to store with the Geofence. An array of key-value pairs.

" + "documentation":"

Associates one of more properties with the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" }, "Geometry":{ "shape":"GeofenceGeometry", @@ -1755,6 +1755,12 @@ "shape":"DistanceUnit", "documentation":"

Set the unit system to specify the distance.

Default Value: Kilometers

" }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" + }, "TravelMode":{ "shape":"TravelMode", "documentation":"

Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility.

The TravelMode you specify also determines how you specify route preferences:

Bicycle or Motorcycle are only valid when using Grab as a data provider, and only within Southeast Asia.

Truck is not available for Grab.

For more information about using Grab as a data provider, see GrabMaps in the Amazon Location Service Developer Guide.

Default Value: Car

" @@ -1897,6 +1903,12 @@ "shape":"Boolean", "documentation":"

Set to include the geometry details in the result for each path between a pair of positions.

Default Value: false

Valid Values: false | true

" }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" + }, "TravelMode":{ "shape":"TravelMode", "documentation":"

Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. You can choose Car, Truck, Walking, Bicycle or Motorcycle as options for the TravelMode.

Bicycle and Motorcycle are only valid when using Grab as a data provider, and only within Southeast Asia.

Truck is not available for Grab.

For more details on the using Grab for routing, including areas of coverage, see GrabMaps in the Amazon Location Service Developer Guide.

The TravelMode you specify also determines how you specify route preferences:

Default Value: Car

" @@ -2344,6 +2356,10 @@ "shape":"ResourceDescription", "documentation":"

An optional description for the tracker resource.

" }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"

Whether to enable position UPDATE events from this tracker to be sent to EventBridge.

You do not need enable this feature to get ENTER and EXIT events for geofences with this tracker. Those events are always sent to EventBridge.

" + }, "KmsKeyId":{ "shape":"KmsKeyId", "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN.

" @@ -2848,6 +2864,10 @@ "shape":"ResourceDescription", "documentation":"

The optional description for the tracker resource.

" }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"

Whether UPDATE events from this tracker in EventBridge are enabled. If set to true these events will be sent to EventBridge.

" + }, "KmsKeyId":{ "shape":"KmsKeyId", "documentation":"

A key identifier for an Amazon Web Services KMS customer managed key assigned to the Amazon Location resource.

" @@ -3179,7 +3199,7 @@ }, "GeofenceProperties":{ "shape":"PropertyMap", - "documentation":"

Contains additional user-defined properties stored with the geofence. An array of key-value pairs.

" + "documentation":"

User defined properties of the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" }, "Geometry":{ "shape":"GeofenceGeometry", @@ -3436,6 +3456,12 @@ "location":"uri", "locationName":"IndexName" }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" + }, "Language":{ "shape":"LanguageTag", "documentation":"

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for a location around Athens, Greece, with the language parameter set to en. The city in the results will most likely be returned as Athens.

If you set the language parameter to el, for Greek, then the city in the results will more likely be returned as Αθήνα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

", @@ -3750,7 +3776,7 @@ }, "GeofenceProperties":{ "shape":"PropertyMap", - "documentation":"

Contains additional user-defined properties stored with the geofence. An array of key-value pairs.

" + "documentation":"

User defined properties of the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" }, "Geometry":{ "shape":"GeofenceGeometry", @@ -4469,7 +4495,7 @@ }, "GeofenceProperties":{ "shape":"PropertyMap", - "documentation":"

Specifies additional user-defined properties to store with the Geofence. An array of key-value pairs.

" + "documentation":"

Associates one of more properties with the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.

Format: \"key\" : \"value\"

" }, "Geometry":{ "shape":"GeofenceGeometry", @@ -4703,6 +4729,12 @@ "location":"uri", "locationName":"IndexName" }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" + }, "Language":{ "shape":"LanguageTag", "documentation":"

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for a location around Athens, Greece, with the language parameter set to en. The city in the results will most likely be returned as Athens.

If you set the language parameter to el, for Greek, then the city in the results will more likely be returned as Αθήνα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

" @@ -4789,6 +4821,12 @@ "location":"uri", "locationName":"IndexName" }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" + }, "Language":{ "shape":"LanguageTag", "documentation":"

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for Athens, Gr to get suggestions with the language parameter set to en. The results found will most likely be returned as Athens, Greece.

If you set the language parameter to el, for Greek, then the result found will more likely be returned as Αθήνα, Ελλάδα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

" @@ -4903,6 +4941,12 @@ "location":"uri", "locationName":"IndexName" }, + "Key":{ + "shape":"ApiKey", + "documentation":"

The optional API key to authorize the request.

", + "location":"querystring", + "locationName":"key" + }, "Language":{ "shape":"LanguageTag", "documentation":"

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for Athens, Greece, with the language parameter set to en. The result found will most likely be returned as Athens.

If you set the language parameter to el, for Greek, then the result found will more likely be returned as Αθήνα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

" @@ -5500,6 +5544,10 @@ "shape":"ResourceDescription", "documentation":"

Updates the description for the tracker resource.

" }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"

Whether to enable position UPDATE events from this tracker to be sent to EventBridge.

You do not need enable this feature to get ENTER and EXIT events for geofences with this tracker. Those events are always sent to EventBridge.

" + }, "PositionFiltering":{ "shape":"PositionFiltering", "documentation":"

Updates the position filtering for the tracker resource.

Valid values:

" diff --git a/botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json b/botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json index 5f12e35f84..fb8b6059a9 100644 --- a/botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json +++ b/botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,179 +111,240 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://outposts-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://outposts-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://outposts.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://outposts.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://outposts-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://outposts-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://outposts.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -311,7 +352,7 @@ { "conditions": [], "endpoint": { - "url": "https://outposts.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://outposts.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -320,66 +361,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-east-1" - ] - } - ], - "endpoint": { - "url": "https://outposts.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-west-1" - ] - } - ], - "endpoint": { - "url": "https://outposts.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://outposts.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/botocore/data/outposts/2019-12-03/paginators-1.json b/botocore/data/outposts/2019-12-03/paginators-1.json index ea142457a6..3641155079 100644 --- a/botocore/data/outposts/2019-12-03/paginators-1.json +++ b/botocore/data/outposts/2019-12-03/paginators-1.json @@ -1,3 +1,40 @@ { - "pagination": {} + "pagination": { + "GetOutpostInstanceTypes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InstanceTypes" + }, + "ListAssets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Assets" + }, + "ListCatalogItems": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "CatalogItems" + }, + "ListOrders": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Orders" + }, + "ListOutposts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Outposts" + }, + "ListSites": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Sites" + } + } } diff --git a/botocore/data/outposts/2019-12-03/paginators-1.sdk-extras.json b/botocore/data/outposts/2019-12-03/paginators-1.sdk-extras.json new file mode 100644 index 0000000000..f13d39be68 --- /dev/null +++ b/botocore/data/outposts/2019-12-03/paginators-1.sdk-extras.json @@ -0,0 +1,13 @@ +{ + "version": 1.0, + "merge": { + "pagination": { + "GetOutpostInstanceTypes": { + "non_aggregate_keys": [ + "OutpostArn", + "OutpostId" + ] + } + } + } +} diff --git a/botocore/data/outposts/2019-12-03/service-2.json b/botocore/data/outposts/2019-12-03/service-2.json index 983bc1cab8..e5107dffb3 100644 --- a/botocore/data/outposts/2019-12-03/service-2.json +++ b/botocore/data/outposts/2019-12-03/service-2.json @@ -588,7 +588,8 @@ "type":"string", "enum":[ "ACTIVE", - "RETIRING" + "RETIRING", + "ISOLATED" ] }, "AssetType":{ @@ -1261,7 +1262,7 @@ }, "PreviousOrderId":{ "shape":"OrderId", - "documentation":"

The ID of the previous order.

" + "documentation":"

The ID of the previous order item.

" } }, "documentation":"

Information about a line item.

" @@ -1658,7 +1659,7 @@ }, "OrderType":{ "shape":"OrderType", - "documentation":"

The type of order.

" + "documentation":"

Type of order.

" } }, "documentation":"

Information about an order.

" @@ -1697,7 +1698,7 @@ }, "OrderType":{ "shape":"OrderType", - "documentation":"

The type of order.

" + "documentation":"

The type of order.

" }, "Status":{ "shape":"OrderStatus", @@ -2083,7 +2084,7 @@ "StatusList":{ "type":"list", "member":{"shape":"AssetState"}, - "max":2, + "max":3, "min":1 }, "String":{ diff --git a/botocore/data/quicksight/2018-04-01/service-2.json b/botocore/data/quicksight/2018-04-01/service-2.json index 704e0bad91..db2b5d68fa 100644 --- a/botocore/data/quicksight/2018-04-01/service-2.json +++ b/botocore/data/quicksight/2018-04-01/service-2.json @@ -2364,7 +2364,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Starts an Asset Bundle export job.

An Asset Bundle export job exports specified Amazon QuickSight assets. You can also choose to export any asset dependencies in the same job. Export jobs run asynchronously and can be polled with a DescribeAssetBundleExportJob API call. When a job is successfully completed, a download URL that contains the exported assets is returned. The URL is valid for 5 minutes and can be refreshed with a DescribeAssetBundleExportJob API call. Each Amazon QuickSight account can run up to 10 export jobs concurrently.

The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported.

" + "documentation":"

Starts an Asset Bundle export job.

An Asset Bundle export job exports specified Amazon QuickSight assets. You can also choose to export any asset dependencies in the same job. Export jobs run asynchronously and can be polled with a DescribeAssetBundleExportJob API call. When a job is successfully completed, a download URL that contains the exported assets is returned. The URL is valid for 5 minutes and can be refreshed with a DescribeAssetBundleExportJob API call. Each Amazon QuickSight account can run up to 5 export jobs concurrently.

The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported.

" }, "StartAssetBundleImportJob":{ "name":"StartAssetBundleImportJob", @@ -2383,7 +2383,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Starts an Asset Bundle import job.

An Asset Bundle import job imports specified Amazon QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon QuickSight account. Each Amazon QuickSight account can run up to 10 import jobs concurrently.

The API caller must have the necessary \"create\", \"describe\", and \"update\" permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported.

" + "documentation":"

Starts an Asset Bundle import job.

An Asset Bundle import job imports specified Amazon QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon QuickSight account. Each Amazon QuickSight account can run up to 5 import jobs concurrently.

The API caller must have the necessary \"create\", \"describe\", and \"update\" permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported.

" }, "TagResource":{ "name":"TagResource", @@ -3123,8 +3123,7 @@ "type":"structure", "required":[ "Column", - "SortDirection", - "AggregationFunction" + "SortDirection" ], "members":{ "Column":{ @@ -4258,7 +4257,7 @@ "documentation":"

The Amazon S3 URI for an asset bundle import file that exists in an Amazon S3 bucket that the caller has read access to. The file must be a zip format file and can't exceed 20 MB.

" } }, - "documentation":"

The source of the asset bundle zip file that contains the data that you want to import.

" + "documentation":"

The source of the asset bundle zip file that contains the data that you want to import. The file must be in QUICKSIGHT_JSON format.

" }, "AssetBundleImportSourceDescription":{ "type":"structure", @@ -5490,6 +5489,16 @@ "max":3, "min":2 }, + "ColorsConfiguration":{ + "type":"structure", + "members":{ + "CustomColors":{ + "shape":"CustomColorsList", + "documentation":"

A list of up to 50 custom colors.

" + } + }, + "documentation":"

The color configurations for a column.

" + }, "ColumnConfiguration":{ "type":"structure", "required":["Column"], @@ -5505,6 +5514,10 @@ "Role":{ "shape":"ColumnRole", "documentation":"

The role of the column.

" + }, + "ColorsConfiguration":{ + "shape":"ColorsConfiguration", + "documentation":"

The color configurations of the column.

" } }, "documentation":"

The general configuration of a column.

" @@ -7808,6 +7821,30 @@ }, "documentation":"

The URL operation that opens a link to another webpage.

" }, + "CustomColor":{ + "type":"structure", + "required":["Color"], + "members":{ + "FieldValue":{ + "shape":"FieldValue", + "documentation":"

The data value that the color is applied to.

" + }, + "Color":{ + "shape":"HexColor", + "documentation":"

The color that is applied to the data value.

" + }, + "SpecialValue":{ + "shape":"SpecialValue", + "documentation":"

The value of a special data value.

" + } + }, + "documentation":"

Determines the color that's applied to a particular data value in a column.

" + }, + "CustomColorsList":{ + "type":"list", + "member":{"shape":"CustomColor"}, + "max":50 + }, "CustomContentConfiguration":{ "type":"structure", "members":{ @@ -11335,7 +11372,7 @@ }, "ExportFormat":{ "shape":"AssetBundleExportFormat", - "documentation":"

The format of the export.

" + "documentation":"

The format of the exported asset bundle. A QUICKSIGHT_JSON formatted file can be used to make a StartAssetBundleImportJob API call. A CLOUDFORMATION_JSON formatted file can be used in the CloudFormation console and with the CloudFormation APIs.

" }, "CloudFormationOverridePropertyConfiguration":{ "shape":"AssetBundleCloudFormationOverridePropertyConfiguration", @@ -23865,6 +23902,34 @@ }, "documentation":"

The display options of a control.

" }, + "SmallMultiplesAxisPlacement":{ + "type":"string", + "enum":[ + "OUTSIDE", + "INSIDE" + ] + }, + "SmallMultiplesAxisProperties":{ + "type":"structure", + "members":{ + "Scale":{ + "shape":"SmallMultiplesAxisScale", + "documentation":"

Determines whether scale of the axes are shared or independent. The default value is SHARED.

" + }, + "Placement":{ + "shape":"SmallMultiplesAxisPlacement", + "documentation":"

Defines the placement of the axis. By default, axes are rendered OUTSIDE of the panels. Axes with INDEPENDENT scale are rendered INSIDE the panels.

" + } + }, + "documentation":"

Configures the properties of a chart's axes that are used by small multiples panels.

" + }, + "SmallMultiplesAxisScale":{ + "type":"string", + "enum":[ + "SHARED", + "INDEPENDENT" + ] + }, "SmallMultiplesDimensionFieldList":{ "type":"list", "member":{"shape":"DimensionField"}, @@ -23884,6 +23949,14 @@ "PanelConfiguration":{ "shape":"PanelConfiguration", "documentation":"

Configures the display options for each small multiples panel.

" + }, + "XAxis":{ + "shape":"SmallMultiplesAxisProperties", + "documentation":"

The properties of a small multiples X axis.

" + }, + "YAxis":{ + "shape":"SmallMultiplesAxisProperties", + "documentation":"

The properties of a small multiples Y axis.

" } }, "documentation":"

Options that determine the layout and display options of a chart's small multiples.

" @@ -23958,6 +24031,14 @@ }, "documentation":"

The parameters for Spark.

" }, + "SpecialValue":{ + "type":"string", + "enum":[ + "EMPTY", + "NULL", + "OTHER" + ] + }, "SqlEndpointPath":{ "type":"string", "max":4096, @@ -24080,7 +24161,7 @@ }, "AssetBundleImportSource":{ "shape":"AssetBundleImportSource", - "documentation":"

The source of the asset bundle zip file that contains the data that you want to import.

" + "documentation":"

The source of the asset bundle zip file that contains the data that you want to import. The file must be in QUICKSIGHT_JSON format.

" }, "OverrideParameters":{ "shape":"AssetBundleImportJobOverrideParameters", diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 5468f5d0d8..d2b0ff2172 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -604,7 +604,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

This action applies only to Aurora DB clusters.

" + "documentation":"

Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

This operation applies only to Aurora DB clusters.

" }, "CreateOptionGroup":{ "name":"CreateOptionGroup", @@ -3943,7 +3943,7 @@ "members":{ "DBName":{ "shape":"String", - "documentation":"

The meaning of this parameter differs depending on the database engine.

Amazon Aurora MySQL
Amazon Aurora PostgreSQL
Amazon RDS Custom for Oracle
Amazon RDS Custom for SQL Server
RDS for MariaDB
RDS for MySQL
RDS for Oracle
RDS for PostgreSQL
RDS for SQL Server

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.

Constraints:

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created.

Default: postgres

Constraints:

The Oracle System ID (SID) of the created RDS Custom DB instance.

Default: ORCL

Constraints:

Not applicable. Must be null.

The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.

Constraints:

The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.

Constraints:

The Oracle System ID (SID) of the created DB instance.

Default: ORCL

Constraints:

The name of the database to create when the DB instance is created.

Default: postgres

Constraints:

Not applicable. Must be null.

" + "documentation":"

The meaning of this parameter differs depending on the database engine.

Amazon Aurora MySQL

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.

Constraints:

Amazon Aurora PostgreSQL

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created.

Default: postgres

Constraints:

Amazon RDS Custom for Oracle

The Oracle System ID (SID) of the created RDS Custom DB instance.

Default: ORCL

Constraints:

Amazon RDS Custom for SQL Server

Not applicable. Must be null.

RDS for MariaDB

The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.

Constraints:

RDS for MySQL

The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.

Constraints:

RDS for Oracle

The Oracle System ID (SID) of the created DB instance.

Default: ORCL

Constraints:

RDS for PostgreSQL

The name of the database to create when the DB instance is created.

Default: postgres

Constraints:

RDS for SQL Server

Not applicable. Must be null.

" }, "DBInstanceIdentifier":{ "shape":"String", @@ -3951,7 +3951,7 @@ }, "AllocatedStorage":{ "shape":"IntegerOptional", - "documentation":"

The amount of storage in gibibytes (GiB) to allocate for the DB instance.

This setting doesn't apply to Amazon Aurora DB instances. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

Amazon RDS Custom
RDS for MariaDB
RDS for MySQL
RDS for Oracle
RDS for PostgreSQL
RDS for SQL Server

Constraints to the amount of storage for each storage type are the following:

Constraints to the amount of storage for each storage type are the following:

Constraints to the amount of storage for each storage type are the following:

Constraints to the amount of storage for each storage type are the following:

Constraints to the amount of storage for each storage type are the following:

Constraints to the amount of storage for each storage type are the following:

" + "documentation":"

The amount of storage in gibibytes (GiB) to allocate for the DB instance.

This setting doesn't apply to Amazon Aurora DB instances. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.

Amazon RDS Custom

Constraints to the amount of storage for each storage type are the following:

RDS for MariaDB

Constraints to the amount of storage for each storage type are the following:

RDS for MySQL

Constraints to the amount of storage for each storage type are the following:

RDS for Oracle

Constraints to the amount of storage for each storage type are the following:

RDS for PostgreSQL

Constraints to the amount of storage for each storage type are the following:

RDS for SQL Server

Constraints to the amount of storage for each storage type are the following:

" }, "DBInstanceClass":{ "shape":"String", @@ -4011,7 +4011,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use.

This setting doesn't apply to Amazon Aurora DB instances. The version number of the database engine the DB instance uses is managed by the DB cluster.

For a list of valid engine versions, use the DescribeDBEngineVersions operation.

The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

Amazon RDS Custom for Oracle
Amazon RDS Custom for SQL Server
RDS for MariaDB
RDS for Microsoft SQL Server
RDS for MySQL
RDS for Oracle
RDS for PostgreSQL

A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.

See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.

For information, see MariaDB on Amazon RDS versions in the Amazon RDS User Guide.

For information, see Microsoft SQL Server versions on Amazon RDS in the Amazon RDS User Guide.

For information, see MySQL on Amazon RDS versions in the Amazon RDS User Guide.

For information, see Oracle Database Engine release notes in the Amazon RDS User Guide.

For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.

" + "documentation":"

The version number of the database engine to use.

This setting doesn't apply to Amazon Aurora DB instances. The version number of the database engine the DB instance uses is managed by the DB cluster.

For a list of valid engine versions, use the DescribeDBEngineVersions operation.

The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

Amazon RDS Custom for Oracle

A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.

Amazon RDS Custom for SQL Server

See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.

RDS for MariaDB

For information, see MariaDB on Amazon RDS versions in the Amazon RDS User Guide.

RDS for Microsoft SQL Server

For information, see Microsoft SQL Server versions on Amazon RDS in the Amazon RDS User Guide.

RDS for MySQL

For information, see MySQL on Amazon RDS versions in the Amazon RDS User Guide.

RDS for Oracle

For information, see Oracle Database Engine release notes in the Amazon RDS User Guide.

RDS for PostgreSQL

For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -4075,15 +4075,15 @@ }, "DomainFqdn":{ "shape":"String", - "documentation":"

Specifies the fully qualified domain name of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" + "documentation":"

The fully qualified domain name (FQDN) of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" }, "DomainOu":{ "shape":"String", - "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" + "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" }, "DomainAuthSecretArn":{ "shape":"String", - "documentation":"

The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" + "documentation":"

The ARN for the Secrets Manager secret with the credentials for the user joining the domain.

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" }, "DomainDnsIps":{ "shape":"StringList", @@ -4091,7 +4091,7 @@ }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", - "documentation":"

Spcifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.

" + "documentation":"

Specifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.

" }, "MonitoringInterval":{ "shape":"IntegerOptional", @@ -4303,15 +4303,15 @@ }, "DomainFqdn":{ "shape":"String", - "documentation":"

Specifies the fully qualified domain name of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" + "documentation":"

The fully qualified domain name (FQDN) of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" }, "DomainOu":{ "shape":"String", - "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" + "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" }, "DomainAuthSecretArn":{ "shape":"String", - "documentation":"

The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" + "documentation":"

The ARN for the Secrets Manager secret with the credentials for the user joining the domain.

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" }, "DomainDnsIps":{ "shape":"StringList", @@ -4629,31 +4629,31 @@ "members":{ "GlobalClusterIdentifier":{ "shape":"String", - "documentation":"

The cluster identifier of the new global database cluster. This parameter is stored as a lowercase string.

" + "documentation":"

The cluster identifier for this global database cluster. This parameter is stored as a lowercase string.

" }, "SourceDBClusterIdentifier":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) to use as the primary cluster of the global database. This parameter is optional.

" + "documentation":"

The Amazon Resource Name (ARN) to use as the primary cluster of the global database.

If you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:

" }, "Engine":{ "shape":"String", - "documentation":"

The name of the database engine to be used for this DB cluster.

" + "documentation":"

The database engine to use for this global database cluster.

Valid Values: aurora-mysql | aurora-postgresql

Constraints:

" }, "EngineVersion":{ "shape":"String", - "documentation":"

The engine version of the Aurora global database.

" + "documentation":"

The engine version to use for this global database cluster.

Constraints:

" }, "DeletionProtection":{ "shape":"BooleanOptional", - "documentation":"

The deletion protection setting for the new global database. The global database can't be deleted when deletion protection is enabled.

" + "documentation":"

Specifies whether to enable deletion protection for the new global database cluster. The global database can't be deleted when deletion protection is enabled.

" }, "DatabaseName":{ "shape":"String", - "documentation":"

The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.

" + "documentation":"

The name for your database of up to 64 alphanumeric characters. If you don't specify a name, Amazon Aurora doesn't create a database in the global database cluster.

Constraints:

" }, "StorageEncrypted":{ "shape":"BooleanOptional", - "documentation":"

The storage encryption setting for the new global database cluster.

" + "documentation":"

Specifies whether to enable storage encryption for the new global database cluster.

Constraints:

" } } }, @@ -8956,7 +8956,7 @@ }, "Status":{ "shape":"String", - "documentation":"

The status of the Active Directory Domain membership for the DB instance or cluster. Values include joined, pending-join, failed, and so on.

" + "documentation":"

The status of the Active Directory Domain membership for the DB instance or cluster. Values include joined, pending-join, failed, and so on.

" }, "FQDN":{ "shape":"String", @@ -8964,19 +8964,19 @@ }, "IAMRoleName":{ "shape":"String", - "documentation":"

The name of the IAM role to be used when making API calls to the Directory Service.

" + "documentation":"

The name of the IAM role used when making API calls to the Directory Service.

" }, "OU":{ "shape":"String", - "documentation":"

The Active Directory organizational unit for your DB instance to join.

" + "documentation":"

The Active Directory organizational unit for the DB instance or cluster.

" }, "AuthSecretArn":{ "shape":"String", - "documentation":"

The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.

" + "documentation":"

The ARN for the Secrets Manager secret with the credentials for the user that's a member of the domain.

" }, "DnsIps":{ "shape":"StringList", - "documentation":"

The IPv4 DNS IP addresses of your primary and secondary Active Directory domain controllers.

" + "documentation":"

The IPv4 DNS IP addresses of the primary and secondary Active Directory domain controllers.

" } }, "documentation":"

An Active Directory Domain membership record associated with the DB instance or cluster.

" @@ -10672,15 +10672,15 @@ }, "DomainFqdn":{ "shape":"String", - "documentation":"

Specifies the fully qualified domain name of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" + "documentation":"

The fully qualified domain name (FQDN) of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" }, "DomainOu":{ "shape":"String", - "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" + "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" }, "DomainAuthSecretArn":{ "shape":"String", - "documentation":"

The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" + "documentation":"

The ARN for the Secrets Manager secret with the credentials for the user joining the domain.

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" }, "DomainDnsIps":{ "shape":"StringList", @@ -10712,7 +10712,7 @@ }, "DisableDomain":{ "shape":"BooleanOptional", - "documentation":"

Boolean. If present, removes the instance from the Active Directory domain.

" + "documentation":"

Specifies whether to remove the DB instance from the Active Directory domain.

" }, "PromotionTier":{ "shape":"IntegerOptional", @@ -13087,15 +13087,15 @@ }, "DomainFqdn":{ "shape":"String", - "documentation":"

Specifies the fully qualified domain name of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" + "documentation":"

The fully qualified domain name (FQDN) of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" }, "DomainOu":{ "shape":"String", - "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" + "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" }, "DomainAuthSecretArn":{ "shape":"String", - "documentation":"

The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.

Constraints:

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" + "documentation":"

The ARN for the Secrets Manager secret with the credentials for the user joining the domain.

Constraints:

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" }, "DomainDnsIps":{ "shape":"StringList", @@ -13107,7 +13107,7 @@ }, "DomainIAMRoleName":{ "shape":"String", - "documentation":"

Specify the name of the IAM role to be used when making API calls to the Directory Service.

This setting doesn't apply to RDS Custom.

" + "documentation":"

The name of the IAM role to use when making API calls to the Directory Service.

This setting doesn't apply to RDS Custom DB instances.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -13477,19 +13477,19 @@ }, "DomainIAMRoleName":{ "shape":"String", - "documentation":"

Specify the name of the IAM role to be used when making API calls to the Directory Service.

This setting doesn't apply to RDS Custom.

" + "documentation":"

The name of the IAM role to use when making API calls to the Directory Service.

This setting doesn't apply to RDS Custom DB instances.

" }, "DomainFqdn":{ "shape":"String", - "documentation":"

Specifies the fully qualified domain name of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" + "documentation":"

The fully qualified domain name (FQDN) of an Active Directory domain.

Constraints:

Example: mymanagedADtest.mymanagedAD.mydomain

" }, "DomainOu":{ "shape":"String", - "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" + "documentation":"

The Active Directory organizational unit for your DB instance to join.

Constraints:

Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain

" }, "DomainAuthSecretArn":{ "shape":"String", - "documentation":"

The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.

Constraints:

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" + "documentation":"

The ARN for the Secrets Manager secret with the credentials for the user joining the domain.

Constraints:

Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456

" }, "DomainDnsIps":{ "shape":"StringList", diff --git a/botocore/handlers.py b/botocore/handlers.py index 9546c61128..55087f6749 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -73,6 +73,7 @@ from botocore.exceptions import MissingServiceIdError # noqa from botocore.utils import hyphenize_service_id # noqa from botocore.utils import is_global_accesspoint # noqa +from botocore.utils import SERVICE_NAME_ALIASES # noqa logger = logging.getLogger(__name__) @@ -99,8 +100,6 @@ S3_SIGNING_NAMES = ('s3', 's3-outposts', 's3-object-lambda') VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$') -SERVICE_NAME_ALIASES = {'runtime.sagemaker': 'sagemaker-runtime'} - def handle_service_name_alias(service_name, **kwargs): return SERVICE_NAME_ALIASES.get(service_name, service_name) diff --git a/botocore/session.py b/botocore/session.py index 9aa596bdda..b260d76cd1 100644 --- a/botocore/session.py +++ b/botocore/session.py @@ -42,6 +42,7 @@ from botocore.configprovider import ( BOTOCORE_DEFAUT_SESSION_VARIABLES, ConfigChainFactory, + ConfiguredEndpointProvider, ConfigValueStore, DefaultConfigResolver, SmartDefaultsConfigStoreFactory, @@ -957,7 +958,7 @@ def create_client( auth_token = self.get_auth_token() endpoint_resolver = self._get_internal_component('endpoint_resolver') exceptions_factory = self._get_internal_component('exceptions_factory') - config_store = self.get_component('config_store') + config_store = copy.copy(self.get_component('config_store')) user_agent_creator = self.get_component('user_agent_creator') # Session configuration values for the user agent string are applied # just before each client creation because they may have been modified @@ -972,10 +973,15 @@ def create_client( smart_defaults_factory = self._get_internal_component( 'smart_defaults_factory' ) - config_store = copy.deepcopy(config_store) smart_defaults_factory.merge_smart_defaults( config_store, defaults_mode, region_name ) + + self._add_configured_endpoint_provider( + client_name=service_name, + config_store=config_store, + ) + client_creator = botocore.client.ClientCreator( loader, endpoint_resolver, @@ -1045,6 +1051,17 @@ def _resolve_defaults_mode(self, client_config, config_store): return lmode + def _add_configured_endpoint_provider(self, client_name, config_store): + chain = ConfiguredEndpointProvider( + full_config=self.full_config, + scoped_config=self.get_scoped_config(), + client_name=client_name, + ) + config_store.set_config_provider( + logical_name='endpoint_url', + provider=chain, + ) + def _missing_cred_vars(self, access_key, secret_key): if access_key is not None and secret_key is None: return 'aws_secret_access_key' diff --git a/botocore/utils.py b/botocore/utils.py index 67755968de..266d204629 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -3353,3 +3353,79 @@ def _serialize_if_needed(self, value, iso=False): return value.isoformat() return value.strftime('%Y-%m-%dT%H:%M:%S%Z') return value + + +# This parameter is not part of the public interface and is subject to abrupt +# breaking changes or removal without prior announcement. +# Mapping of services that have been renamed for backwards compatibility reasons. +# Keys are the previous name that should be allowed, values are the documented +# and preferred client name. +SERVICE_NAME_ALIASES = {'runtime.sagemaker': 'sagemaker-runtime'} + + +# This parameter is not part of the public interface and is subject to abrupt +# breaking changes or removal without prior announcement. +# Mapping to determine the service ID for services that do not use it as the +# model data directory name. The keys are the data directory name and the +# values are the transformed service IDs (lower case and hyphenated). +CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES = { + # Actual service name we use -> Allowed computed service name. + 'alexaforbusiness': 'alexa-for-business', + 'apigateway': 'api-gateway', + 'application-autoscaling': 'application-auto-scaling', + 'appmesh': 'app-mesh', + 'autoscaling': 'auto-scaling', + 'autoscaling-plans': 'auto-scaling-plans', + 'ce': 'cost-explorer', + 'cloudhsmv2': 'cloudhsm-v2', + 'cloudsearchdomain': 'cloudsearch-domain', + 'cognito-idp': 'cognito-identity-provider', + 'config': 'config-service', + 'cur': 'cost-and-usage-report-service', + 'datapipeline': 'data-pipeline', + 'directconnect': 'direct-connect', + 'devicefarm': 'device-farm', + 'discovery': 'application-discovery-service', + 'dms': 'database-migration-service', + 'ds': 'directory-service', + 'dynamodbstreams': 'dynamodb-streams', + 'elasticbeanstalk': 'elastic-beanstalk', + 'elastictranscoder': 'elastic-transcoder', + 'elb': 'elastic-load-balancing', + 'elbv2': 'elastic-load-balancing-v2', + 'es': 'elasticsearch-service', + 'events': 'eventbridge', + 'globalaccelerator': 'global-accelerator', + 'iot-data': 'iot-data-plane', + 'iot-jobs-data': 'iot-jobs-data-plane', + 'iot1click-devices': 'iot-1click-devices-service', + 'iot1click-projects': 'iot-1click-projects', + 'iotevents-data': 'iot-events-data', + 'iotevents': 'iot-events', + 'iotwireless': 'iot-wireless', + 'kinesisanalytics': 'kinesis-analytics', + 'kinesisanalyticsv2': 'kinesis-analytics-v2', + 'kinesisvideo': 'kinesis-video', + 'lex-models': 'lex-model-building-service', + 'lexv2-models': 'lex-models-v2', + 'lex-runtime': 'lex-runtime-service', + 'lexv2-runtime': 'lex-runtime-v2', + 'logs': 'cloudwatch-logs', + 'machinelearning': 'machine-learning', + 'marketplacecommerceanalytics': 'marketplace-commerce-analytics', + 'marketplace-entitlement': 'marketplace-entitlement-service', + 'meteringmarketplace': 'marketplace-metering', + 'mgh': 'migration-hub', + 'sms-voice': 'pinpoint-sms-voice', + 'resourcegroupstaggingapi': 'resource-groups-tagging-api', + 'route53': 'route-53', + 'route53domains': 'route-53-domains', + 's3control': 's3-control', + 'sdb': 'simpledb', + 'secretsmanager': 'secrets-manager', + 'serverlessrepo': 'serverlessapplicationrepository', + 'servicecatalog': 'service-catalog', + 'servicecatalog-appregistry': 'service-catalog-appregistry', + 'stepfunctions': 'sfn', + 'storagegateway': 'storage-gateway', +} diff --git a/docs/source/conf.py b/docs/source/conf.py index d5863917e5..8e896321bd 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -57,9 +57,9 @@ # built documents. # # The short X.Y version. -version = '1.30' +version = '1.31' # The full version, including alpha/beta/rc tags. -release = '1.30.1' +release = '1.31.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/configured_endpoint_urls/__init__.py b/tests/functional/configured_endpoint_urls/__init__.py new file mode 100644 index 0000000000..c5c740907d --- /dev/null +++ b/tests/functional/configured_endpoint_urls/__init__.py @@ -0,0 +1,12 @@ +# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. diff --git a/tests/functional/configured_endpoint_urls/profile-tests.json b/tests/functional/configured_endpoint_urls/profile-tests.json new file mode 100644 index 0000000000..5681edc1fa --- /dev/null +++ b/tests/functional/configured_endpoint_urls/profile-tests.json @@ -0,0 +1,505 @@ +{ + "description": [ + "These are test descriptions that describe how specific data should be loaded from a profile file based on a ", + "profile name." + ], + + "testSuites": [ + { + "profiles": { + "default": { + "aws_access_key_id": "123", + "aws_secret_access_key": "456", + "region": "fake-region-10" + }, + "service_localhost_global_only": { + "aws_access_key_id": "123", + "aws_secret_access_key": "456", + "region": "fake-region-10", + "endpoint_url": "http://localhost:1234" + }, + "service_global_only": { + "aws_access_key_id": "123", + "aws_secret_access_key": "456", + "region": "fake-region-10", + "endpoint_url": "https://global.endpoint.aws" + }, + "service_specific_s3": { + "aws_access_key_id": "123", + "aws_secret_access_key": "456", + "services": "service_specific_s3", + "region": "fake-region-10" + }, + "global_and_service_specific_s3": { + "aws_access_key_id": "123", + "aws_secret_access_key": "456", + "endpoint_url": "https://global.endpoint.aws", + "services": "service_specific_s3", + "region": "fake-region-10" + }, + "ignore_global_and_service_specific_s3": { + "aws_access_key_id": "123", + "aws_secret_access_key": "456", + "endpoint_url": "https://global.endpoint.aws", + "services": "service_specific_s3", + "region": "fake-region-10", + "ignore_configured_endpoint_urls": "true" + }, + "service_specific_dynamodb_and_s3": { + "aws_access_key_id": "123", + "aws_secret_access_key": "456", + "services": "service_specific_dynamodb_and_s3", + "region": "fake-region-10" + } + }, + + "services": { + "service_specific_s3": { + "s3": { + "endpoint_url": "https://s3.endpoint.aws" + } + }, + "service_specific_dynamodb_and_s3": { + "dynamodb": { + "endpoint_url": "https://dynamodb.endpoint.aws" + }, + "s3": { + "endpoint_url": "https://s3.endpoint.aws" + } + } + }, + + "client_configs": { + "default": {}, + "endpoint_url_provided":{ + "endpoint_url": "https://client-config.endpoint.aws" + }, + "ignore_configured_endpoint_urls": { + "ignore_configured_endpoint_urls": true + }, + "provide_and_ignore_configured_endpoint_urls": { + "ignore_configured_endpoint_urls": true, + "endpoint_url": "https://client-config.endpoint.aws" + } + }, + + "environments": { + "default": {}, + "global_only": { + "AWS_ENDPOINT_URL": "https://global-from-envvar.endpoint.aws" + }, + "service_specific_s3": { + "AWS_ENDPOINT_URL_S3": "https://s3-from-envvar.endpoint.aws" + }, + "global_and_service_specific_s3": { + "AWS_ENDPOINT_URL": "https://global-from-envvar.endpoint.aws", + "AWS_ENDPOINT_URL_S3": "https://s3-from-envvar.endpoint.aws" + + }, + "ignore_global_and_service_specific_s3": { + "AWS_ENDPOINT_URL": "https://global-from-envvar.endpoint.aws", + "AWS_ENDPOINT_URL_S3": "https://s3-from-envvar.endpoint.aws", + "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS": "true" + }, + "service_specific_dynamodb_and_s3": { + "AWS_ENDPOINT_URL_DYNAMODB": "https://dynamodb-from-envvar.endpoint.aws", + "AWS_ENDPOINT_URL_S3": "https://s3-from-envvar.endpoint.aws" + } + }, + + "endpointUrlTests": [ + { + "name": "Global endpoint url is read from services section and used for an S3 client.", + "profile": "service_global_only", + "client_config": "default", + "environment": "default", + "service": "s3", + "output": { + "endpointUrl": "https://global.endpoint.aws" + } + }, + { + "name": "Service specific endpoint url is read from services section and used for an S3 client.", + "profile": "service_specific_s3", + "client_config": "default", + "environment": "default", + "service": "s3", + "output": { + "endpointUrl": "https://s3.endpoint.aws" + } + }, + { + "name": "S3 Service-specific endpoint URL from configuration file takes precedence over global endpoint URL from configuration file.", + "profile": "global_and_service_specific_s3", + "client_config": "default", + "environment": "default", + "service": "s3", + "output": { + "endpointUrl": "https://s3.endpoint.aws" + } + }, + { + "name": "Global endpoint url environment variable takes precedence over the value resolved by the SDK.", + "profile": "default", + "client_config": "default", + "environment": "global_only", + "service": "s3", + "output": { + "endpointUrl": "https://global-from-envvar.endpoint.aws" + } + }, + { + "name": "Global endpoint url environment variable takes precendence over global endpoint configuration option.", + "profile": "service_global_only", + "client_config": "default", + "environment": "global_only", + "service": "s3", + "output": { + "endpointUrl": "https://global-from-envvar.endpoint.aws" + } + }, + { + "name": "Global endpoint url environment variable takes precendence over service-specific endpoint configuration option.", + "profile": "service_specific_s3", + "client_config": "default", + "environment": "global_only", + "service": "s3", + "output": { + "endpointUrl": "https://global-from-envvar.endpoint.aws" + } + }, + { + "name": "Global endpoint url environment variable takes precendence over global endpoint configuration option and service-specific endpoint configuration option.", + "profile": "global_and_service_specific_s3", + "client_config": "default", + "environment": "global_only", + "service": "s3", + "output": { + "endpointUrl": "https://global-from-envvar.endpoint.aws" + } + }, + { + "name": "Service-specific endpoint url environment variable takes precedence over the value resolved by the SDK.", + "profile": "default", + "client_config": "default", + "environment": "service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3-from-envvar.endpoint.aws" + } + }, + { + "name": "Service-specific endpoint url environment variable takes precedence over the global endpoint url configuration option.", + "profile": "service_global_only", + "client_config": "default", + "environment": "service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3-from-envvar.endpoint.aws" + } + }, + { + "name": "Service-specific endpoint url environment variable takes precedence over the service-specific endpoint url configuration option.", + "profile": "service_specific_s3", + "client_config": "default", + "environment": "service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3-from-envvar.endpoint.aws" + } + }, + { + "name": "Service-specific endpoint url environment variable takes precedence over the services-specific endpoint url configuration option and the global endpoint url configuration option.", + "profile": "global_and_service_specific_s3", + "client_config": "default", + "environment": "service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3-from-envvar.endpoint.aws" + } + }, + { + "name": "Service-specific endpoint url environment variable takes precedence over the global endpoint url environment variable.", + "profile": "default", + "client_config": "default", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3-from-envvar.endpoint.aws" + } + }, + { + "name": "Service-specific endpoint url environment variable takes precedence over the global endpoint url environment variable and the global endpoint url configuration option.", + "profile": "service_global_only", + "client_config": "default", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3-from-envvar.endpoint.aws" + } + }, + { + "name": "Service-specific endpoint url environment variable takes precedence over the global endpoint url environment variable and the the service-specific endpoint url configuration option.", + "profile": "service_specific_s3", + "client_config": "default", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3-from-envvar.endpoint.aws" + } + }, + { + "name": "Service-specific endpoint url environment variable takes precedence over the global endpoint url environment variable, the service-specific endpoint URL configuration option, and the global endpoint URL configuration option.", + "profile": "global_and_service_specific_s3", + "client_config": "default", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3-from-envvar.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over value provided by the SDK.", + "profile": "default", + "client_config": "endpoint_url_provided", + "environment": "default", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over global endpoint url from services section and used for an S3 client.", + "profile": "service_global_only", + "client_config": "endpoint_url_provided", + "environment": "default", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service specific endpoint url from services section and used for an S3 client.", + "profile": "service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "default", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over S3 Service-specific endpoint URL from configuration file and global endpoint URL from configuration file.", + "profile": "global_and_service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "default", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over global endpoint url environment variable.", + "profile": "default", + "client_config": "endpoint_url_provided", + "environment": "global_only", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over global endpoint url environment variable and global endpoint configuration option.", + "profile": "service_global_only", + "client_config": "endpoint_url_provided", + "environment": "global_only", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over global endpoint url environment variable and service-specific endpoint configuration option.", + "profile": "service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "global_only", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over global endpoint url environment variable, global endpoint configuration option, and service-specific endpoint configuration option.", + "profile": "global_and_service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "global_only", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service-specific endpoint url environment variable.", + "profile": "default", + "client_config": "endpoint_url_provided", + "environment": "service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service-specific endpoint url environment variable and the global endpoint url configuration option.", + "profile": "service_global_only", + "client_config": "endpoint_url_provided", + "environment": "service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service-specific endpoint url environment variable and the service-specific endpoint url configuration option.", + "profile": "service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service-specific endpoint url environment variable, the services-specific endpoint url configuration option, and the global endpoint url configuration option.", + "profile": "global_and_service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service-specific endpoint url environment variable and the global endpoint url environment variable.", + "profile": "default", + "client_config": "endpoint_url_provided", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service-specific endpoint url environment variable, the global endpoint url environment variable, and the global endpoint url configuration option.", + "profile": "service_global_only", + "client_config": "endpoint_url_provided", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service-specific endpoint url environment variable, the global endpoint url environment variable, and the service-specific endpoint url configuration option.", + "profile": "service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Client configuration takes precedence over service-specific endpoint url environment variable, the global endpoint url environment variable, the service-specific endpoint URL configuration option, and the global endpoint URL configuration option.", + "profile": "global_and_service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "All configured endpoints ignored due to environment variable.", + "profile": "global_and_service_specific_s3", + "client_config": "default", + "environment": "ignore_global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3.fake-region-10.amazonaws.com" + } + }, + { + "name": "All configured endpoints ignored due to shared config variable.", + "profile": "ignore_global_and_service_specific_s3", + "client_config": "default", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3.fake-region-10.amazonaws.com" + } + }, + { + "name": "All configured endpoints ignored due to ignore client config parameter.", + "profile": "global_and_service_specific_s3", + "client_config": "ignore_configured_endpoint_urls", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://s3.fake-region-10.amazonaws.com" + } + }, + { + "name": "Environment variable and shared config file configured endpoints ignored due to ignore shared config variable and client configured endpoint is used.", + "profile": "ignore_global_and_service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Environment variable and shared config file configured endpoints ignored due to ignore environment variable and client configured endpoint is used.", + "profile": "global_and_service_specific_s3", + "client_config": "endpoint_url_provided", + "environment": "ignore_global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "Environment variable and shared config file configured endpoints ignored due to ignore client config variable and client configured endpoint is used.", + "profile": "global_and_service_specific_s3", + "client_config": "provide_and_ignore_configured_endpoint_urls", + "environment": "global_and_service_specific_s3", + "service": "s3", + "output": { + "endpointUrl": "https://client-config.endpoint.aws" + } + }, + { + "name": "DynamoDB service-specific endpoint url shared config variable is used when service-specific S3 shared config variable is also present.", + "profile": "service_specific_dynamodb_and_s3", + "client_config": "default", + "environment": "default", + "service": "dynamodb", + "output": { + "endpointUrl": "https://dynamodb.endpoint.aws" + } + }, + { + "name": "DynamoDB service-specific endpoint url environment variable is used when service-specific S3 environment variable is also present.", + "profile": "default", + "client_config": "default", + "environment": "service_specific_dynamodb_and_s3", + "service": "dynamodb", + "output": { + "endpointUrl": "https://dynamodb-from-envvar.endpoint.aws" + } + } + + ] + } + ] +} diff --git a/tests/functional/configured_endpoint_urls/test_configured_endpoint_url.py b/tests/functional/configured_endpoint_urls/test_configured_endpoint_url.py new file mode 100644 index 0000000000..40cc17e26d --- /dev/null +++ b/tests/functional/configured_endpoint_urls/test_configured_endpoint_url.py @@ -0,0 +1,249 @@ +# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import json +from pathlib import Path +from unittest import mock + +import pytest + +import botocore.configprovider +import botocore.utils +from botocore.compat import urlsplit +from botocore.config import Config +from tests import ClientHTTPStubber + +ENDPOINT_TESTDATA_FILE = Path(__file__).parent / "profile-tests.json" + + +def dict_to_ini_section(ini_dict, section_header): + section_str = f'[{section_header}]\n' + for key, value in ini_dict.items(): + if isinstance(value, dict): + section_str += f"{key} =\n" + for new_key, new_value in value.items(): + section_str += f" {new_key}={new_value}\n" + else: + section_str += f"{key}={value}\n" + return section_str + "\n" + + +def create_cases(): + with open(ENDPOINT_TESTDATA_FILE) as f: + test_suite = json.load(f)['testSuites'][0] + + for test_case_data in test_suite['endpointUrlTests']: + yield pytest.param( + { + 'service': test_case_data['service'], + 'profile': test_case_data['profile'], + 'expected_endpoint_url': test_case_data['output'][ + 'endpointUrl' + ], + 'client_args': get_create_client_args( + test_suite['client_configs'].get( + test_case_data['client_config'], {} + ) + ), + 'config_file_contents': get_config_file_contents( + test_case_data['profile'], test_suite + ), + 'environment': test_suite['environments'].get( + test_case_data['environment'], {} + ), + }, + id=test_case_data['name'], + ) + + +def get_create_client_args(test_case_client_config): + create_client_args = {} + + if 'endpoint_url' in test_case_client_config: + create_client_args['endpoint_url'] = test_case_client_config[ + 'endpoint_url' + ] + + if 'ignore_configured_endpoint_urls' in test_case_client_config: + create_client_args['config'] = Config( + ignore_configured_endpoint_urls=test_case_client_config[ + 'ignore_configured_endpoint_urls' + ] + ) + + return create_client_args + + +def get_config_file_contents(profile_name, test_suite): + profile = test_suite['profiles'][profile_name] + + profile_str = dict_to_ini_section( + profile, + section_header=f"profile {profile_name}", + ) + + services_section_name = profile.get('services', None) + + if services_section_name is None: + return profile_str + + services_section = test_suite['services'][services_section_name] + + service_section_str = dict_to_ini_section( + services_section, + section_header=f'services {services_section_name}', + ) + + return profile_str + service_section_str + + +@pytest.fixture +def client_creator(tmp_path): + tmp_config_file_path = tmp_path / 'config' + environ = {'AWS_CONFIG_FILE': str(tmp_config_file_path)} + + def _do_create_client( + service, + profile, + client_args=None, + config_file_contents=None, + environment=None, + ): + environ.update(environment) + with open(tmp_config_file_path, 'w') as f: + f.write(config_file_contents) + f.flush() + + return botocore.session.Session(profile=profile).create_client( + service, **client_args + ) + + with mock.patch('os.environ', environ): + yield _do_create_client + + +def _normalize_endpoint(url): + split_endpoint = urlsplit(url) + actual_endpoint = f"{split_endpoint.scheme}://{split_endpoint.netloc}" + return actual_endpoint + + +def assert_client_endpoint_url(client, expected_endpoint_url): + assert client.meta.endpoint_url == expected_endpoint_url + + +def assert_endpoint_url_used_for_operation( + client, expected_endpoint_url, operation, params +): + http_stubber = ClientHTTPStubber(client) + http_stubber.start() + http_stubber.add_response() + + # Call an operation on the client + getattr(client, operation)(**params) + + assert ( + _normalize_endpoint(http_stubber.requests[0].url) + == expected_endpoint_url + ) + + +def _known_service_names_and_ids(): + my_session = botocore.session.get_session() + loader = my_session.get_component('data_loader') + available_services = loader.list_available_services('service-2') + + result = [] + for service_name in available_services: + model = my_session.get_service_model(service_name) + result.append((model.service_name, model.service_id)) + return sorted(result) + + +SERVICE_TO_OPERATION = {'s3': 'list_buckets', 'dynamodb': 'list_tables'} + + +@pytest.mark.parametrize("test_case", create_cases()) +def test_resolve_configured_endpoint_url(test_case, client_creator): + client = client_creator( + service=test_case['service'], + profile=test_case['profile'], + client_args=test_case['client_args'], + config_file_contents=test_case['config_file_contents'], + environment=test_case['environment'], + ) + + assert_endpoint_url_used_for_operation( + client=client, + expected_endpoint_url=test_case['expected_endpoint_url'], + operation=SERVICE_TO_OPERATION[test_case['service']], + params={}, + ) + + +@pytest.mark.parametrize( + 'service_name,service_id', _known_service_names_and_ids() +) +def test_expected_service_env_var_name_is_respected( + service_name, service_id, client_creator +): + transformed_service_id = service_id.replace(' ', '_').upper() + + client = client_creator( + service=service_name, + profile='default', + client_args={}, + config_file_contents=( + '[profile default]\n' + 'aws_access_key_id=123\n' + 'aws_secret_access_key=456\n' + 'region=fake-region-10\n' + ), + environment={ + f'AWS_ENDPOINT_URL_{transformed_service_id}': 'https://endpoint-override' + }, + ) + + assert_client_endpoint_url( + client=client, expected_endpoint_url='https://endpoint-override' + ) + + +@pytest.mark.parametrize( + 'service_name,service_id', _known_service_names_and_ids() +) +def test_expected_service_config_section_name_is_respected( + service_name, service_id, client_creator +): + transformed_service_id = service_id.replace(' ', '_').lower() + + client = client_creator( + service=service_name, + profile='default', + client_args={}, + config_file_contents=( + f'[profile default]\n' + f'services=my-services\n' + f'aws_access_key_id=123\n' + f'aws_secret_access_key=456\n' + f'region=fake-region-10\n\n' + f'[services my-services]\n' + f'{transformed_service_id} = \n' + f' endpoint_url = https://endpoint-override\n\n' + ), + environment={}, + ) + + assert_client_endpoint_url( + client=client, expected_endpoint_url='https://endpoint-override' + ) diff --git a/tests/functional/endpoint-rules/outposts/endpoint-tests-1.json b/tests/functional/endpoint-rules/outposts/endpoint-tests-1.json index ad7c0be1cc..5dd2c4cb2f 100644 --- a/tests/functional/endpoint-rules/outposts/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/outposts/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "af-south-1", "UseFIPS": false, - "Region": "af-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": true, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -216,9 +216,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -242,9 +242,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -281,9 +281,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": true, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -294,9 +294,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -307,9 +307,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": true, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -320,9 +320,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -333,9 +333,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": true, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -346,9 +346,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -359,9 +359,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -372,9 +372,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -385,9 +385,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -398,9 +398,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -411,9 +411,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -424,9 +424,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -437,9 +437,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -450,9 +450,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -463,9 +463,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -476,9 +476,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -489,9 +489,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -502,9 +502,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -515,9 +526,31 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -528,9 +561,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -541,22 +585,35 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -566,9 +623,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -578,11 +635,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/tests/functional/models/sdk-default-configuration.json b/tests/functional/models/sdk-default-configuration.json new file mode 100644 index 0000000000..87278367d8 --- /dev/null +++ b/tests/functional/models/sdk-default-configuration.json @@ -0,0 +1,55 @@ +{ + "version": 1, + "base": { + "retryMode": "standard", + "stsRegionalEndpoints": "regional", + "s3UsEast1RegionalEndpoints": "regional", + "connectTimeoutInMillis": 9999000, + "tlsNegotiationTimeoutInMillis": 9999000 + }, + "modes": { + "standard": { + "connectTimeoutInMillis": { + "override": 9999000 + }, + "tlsNegotiationTimeoutInMillis": { + "override": 9999000 + } + }, + "in-region": { + }, + "cross-region": { + "connectTimeoutInMillis": { + "override": 9999000 + }, + "tlsNegotiationTimeoutInMillis": { + "override": 9999000 + } + }, + "mobile": { + "connectTimeoutInMillis": { + "override": 99999000 + }, + "tlsNegotiationTimeoutInMillis": { + "override": 99999000 + } + } + }, + "documentation": { + "modes": { + "standard": "

FOR TESTING ONLY: The STANDARD mode provides the latest recommended default values that should be safe to run in most scenarios

Note that the default values vended from this mode might change as best practices may evolve. As a result, it is encouraged to perform tests when upgrading the SDK

", + "in-region": "

FOR TESTING ONLY: The IN_REGION mode builds on the standard mode and includes optimization tailored for applications which call AWS services from within the same AWS region

Note that the default values vended from this mode might change as best practices may evolve. As a result, it is encouraged to perform tests when upgrading the SDK

", + "cross-region": "

FOR TESTING ONLY: The CROSS_REGION mode builds on the standard mode and includes optimization tailored for applications which call AWS services in a different region

Note that the default values vended from this mode might change as best practices may evolve. As a result, it is encouraged to perform tests when upgrading the SDK

", + "mobile": "

FOR TESTING ONLY: The MOBILE mode builds on the standard mode and includes optimization tailored for mobile applications

Note that the default values vended from this mode might change as best practices may evolve. As a result, it is encouraged to perform tests when upgrading the SDK

", + "auto": "

FOR TESTING ONLY: The AUTO mode is an experimental mode that builds on the standard mode. The SDK will attempt to discover the execution environment to determine the appropriate settings automatically.

Note that the auto detection is heuristics-based and does not guarantee 100% accuracy. STANDARD mode will be used if the execution environment cannot be determined. The auto detection might query EC2 Instance Metadata service, which might introduce latency. Therefore we recommend choosing an explicit defaults_mode instead if startup latency is critical to your application

", + "legacy": "

FOR TESTING ONLY: The LEGACY mode provides default settings that vary per SDK and were used prior to establishment of defaults_mode

" + }, + "configuration": { + "retryMode": "

FOR TESTING ONLY: A retry mode specifies how the SDK attempts retries. See Retry Mode

", + "stsRegionalEndpoints": "

FOR TESTING ONLY: Specifies how the SDK determines the AWS service endpoint that it uses to talk to the AWS Security Token Service (AWS STS). See Setting STS Regional endpoints

", + "s3UsEast1RegionalEndpoints": "

FOR TESTING ONLY: Specifies how the SDK determines the AWS service endpoint that it uses to talk to the Amazon S3 for the us-east-1 region

", + "connectTimeoutInMillis": "

FOR TESTING ONLY: The amount of time after making an initial connection attempt on a socket, where if the client does not receive a completion of the connect handshake, the client gives up and fails the operation

", + "tlsNegotiationTimeoutInMillis": "

FOR TESTING ONLY: The maximum amount of time that a TLS handshake is allowed to take from the time the CLIENT HELLO message is sent to ethe time the client and server have fully negotiated ciphers and exchanged keys

" + } + } +} diff --git a/tests/functional/test_config_provider.py b/tests/functional/test_config_provider.py index 0206286919..57b3fd4457 100644 --- a/tests/functional/test_config_provider.py +++ b/tests/functional/test_config_provider.py @@ -10,8 +10,11 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +from pathlib import Path + import pytest +import botocore.exceptions from botocore.config import Config from botocore.session import get_session @@ -28,6 +31,13 @@ sdk_default_configuration = loader.load_data('sdk-default-configuration') +def assert_client_uses_standard_defaults(client): + assert client.meta.config.s3['us_east_1_regional_endpoint'] == 'regional' + assert client.meta.config.connect_timeout == 3.1 + assert client.meta.endpoint_url == 'https://sts.us-west-2.amazonaws.com' + assert client.meta.config.retries['mode'] == 'standard' + + @pytest.mark.parametrize("mode", sdk_default_configuration['base']) def test_no_new_sdk_default_configuration_values(mode): err_msg = ( @@ -45,7 +55,69 @@ def test_default_configurations_resolve_correctly(): client = session.create_client( 'sts', config=config, region_name='us-west-2' ) + assert_client_uses_standard_defaults(client) + + +@pytest.fixture +def loader(): + test_models_dir = Path(__file__).parent / 'models' + loader = botocore.loaders.Loader() + loader.search_paths.insert(0, test_models_dir) + return loader + + +@pytest.fixture +def session(loader): + session = botocore.session.Session() + session.register_component('data_loader', loader) + return session + + +def assert_client_uses_legacy_defaults(client): + assert client.meta.config.s3 is None + assert client.meta.config.connect_timeout == 60 + assert client.meta.endpoint_url == 'https://sts.amazonaws.com' + assert client.meta.config.retries['mode'] == 'legacy' + + +def assert_client_uses_testing_defaults(client): assert client.meta.config.s3['us_east_1_regional_endpoint'] == 'regional' - assert client.meta.config.connect_timeout == 3.1 - assert client.meta.endpoint_url == 'https://sts.us-west-2.amazonaws.com' + assert client.meta.config.connect_timeout == 9999 + assert client.meta.endpoint_url == 'https://sts.amazonaws.com' assert client.meta.config.retries['mode'] == 'standard' + + +class TestConfigurationDefaults: + def test_defaults_mode_resolved_from_config_store(self, session): + config_store = session.get_component('config_store') + config_store.set_config_variable('defaults_mode', 'standard') + client = session.create_client('sts', 'us-west-2') + assert_client_uses_testing_defaults(client) + + def test_no_mutate_session_provider(self, session): + # Using the standard default mode should change the connect timeout + # on the client, but not the session + standard_client = session.create_client( + 'sts', 'us-west-2', config=Config(defaults_mode='standard') + ) + assert_client_uses_testing_defaults(standard_client) + + # Using the legacy default mode should not change the connect timeout + # on the client or the session. By default the connect timeout for a client + # is 60 seconds, and unset on the session. + legacy_client = session.create_client('sts', 'us-west-2') + assert_client_uses_legacy_defaults(legacy_client) + + def test_defaults_mode_resolved_from_client_config(self, session): + config = Config(defaults_mode='standard') + client = session.create_client('sts', 'us-west-2', config=config) + assert_client_uses_testing_defaults(client) + + def test_defaults_mode_resolved_invalid_mode_exception(self, session): + with pytest.raises(botocore.exceptions.InvalidDefaultsMode): + config = Config(defaults_mode='invalid_default_mode') + session.create_client('sts', 'us-west-2', config=config) + + def test_defaults_mode_resolved_legacy(self, session): + client = session.create_client('sts', 'us-west-2') + assert_client_uses_legacy_defaults(client) diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py index 59ce8f0438..1372f8ff89 100644 --- a/tests/functional/test_endpoints.py +++ b/tests/functional/test_endpoints.py @@ -13,69 +13,7 @@ import pytest from botocore.session import get_session - -SERVICE_RENAMES = { - # Actual service name we use -> Allowed computed service name. - 'alexaforbusiness': 'alexa-for-business', - 'apigateway': 'api-gateway', - 'application-autoscaling': 'application-auto-scaling', - 'appmesh': 'app-mesh', - 'autoscaling': 'auto-scaling', - 'autoscaling-plans': 'auto-scaling-plans', - 'ce': 'cost-explorer', - 'cloudhsmv2': 'cloudhsm-v2', - 'cloudsearchdomain': 'cloudsearch-domain', - 'cognito-idp': 'cognito-identity-provider', - 'config': 'config-service', - 'cur': 'cost-and-usage-report-service', - 'datapipeline': 'data-pipeline', - 'directconnect': 'direct-connect', - 'devicefarm': 'device-farm', - 'discovery': 'application-discovery-service', - 'dms': 'database-migration-service', - 'ds': 'directory-service', - 'dynamodbstreams': 'dynamodb-streams', - 'elasticbeanstalk': 'elastic-beanstalk', - 'elastictranscoder': 'elastic-transcoder', - 'elb': 'elastic-load-balancing', - 'elbv2': 'elastic-load-balancing-v2', - 'es': 'elasticsearch-service', - 'events': 'eventbridge', - 'globalaccelerator': 'global-accelerator', - 'iot-data': 'iot-data-plane', - 'iot-jobs-data': 'iot-jobs-data-plane', - 'iot1click-devices': 'iot-1click-devices-service', - 'iot1click-projects': 'iot-1click-projects', - 'iotevents-data': 'iot-events-data', - 'iotevents': 'iot-events', - 'iotwireless': 'iot-wireless', - 'kinesisanalytics': 'kinesis-analytics', - 'kinesisanalyticsv2': 'kinesis-analytics-v2', - 'kinesisvideo': 'kinesis-video', - 'lex-models': 'lex-model-building-service', - 'lexv2-models': 'lex-models-v2', - 'lex-runtime': 'lex-runtime-service', - 'lexv2-runtime': 'lex-runtime-v2', - 'logs': 'cloudwatch-logs', - 'machinelearning': 'machine-learning', - 'marketplacecommerceanalytics': 'marketplace-commerce-analytics', - 'marketplace-entitlement': 'marketplace-entitlement-service', - 'meteringmarketplace': 'marketplace-metering', - 'mgh': 'migration-hub', - 'sms-voice': 'pinpoint-sms-voice', - 'resourcegroupstaggingapi': 'resource-groups-tagging-api', - 'route53': 'route-53', - 'route53domains': 'route-53-domains', - 's3control': 's3-control', - 'sdb': 'simpledb', - 'secretsmanager': 'secrets-manager', - 'serverlessrepo': 'serverlessapplicationrepository', - 'servicecatalog': 'service-catalog', - 'servicecatalog-appregistry': 'service-catalog-appregistry', - 'stepfunctions': 'sfn', - 'storagegateway': 'storage-gateway', -} - +from botocore.utils import CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES ENDPOINT_PREFIX_OVERRIDE = { # entry in endpoints.json -> actual endpoint prefix. @@ -163,7 +101,7 @@ def test_endpoint_matches_service(endpoint_prefix): @pytest.mark.parametrize("service_name", AVAILABLE_SERVICES) -def test_service_name_matches_endpoint_prefix(service_name): +def test_client_name_matches_hyphenized_service_id(service_name): """Generates tests for each service to verify that the computed service named based on the service id matches the service name used to create a client (i.e the directory name in botocore/data) @@ -174,7 +112,9 @@ def test_service_name_matches_endpoint_prefix(service_name): # Handle known exceptions where we have renamed the service directory # for one reason or another. - actual_service_name = SERVICE_RENAMES.get(service_name, service_name) + actual_service_name = CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES.get( + service_name, service_name + ) err_msg = ( f"Actual service name `{actual_service_name}` does not match " diff --git a/tests/unit/cfg/aws_services_config b/tests/unit/cfg/aws_services_config new file mode 100644 index 0000000000..cf9040c2a4 --- /dev/null +++ b/tests/unit/cfg/aws_services_config @@ -0,0 +1,9 @@ +[default] +endpoint_url = https://localhost:1234/ +services = my-services + +[services my-services] +s3 = + endpoint_url = https://localhost:5678/ +dynamodb = + endpoint_url = https://localhost:8888/ diff --git a/tests/unit/test_config_provider.py b/tests/unit/test_config_provider.py index 64760d9d24..4a7d91b0d6 100644 --- a/tests/unit/test_config_provider.py +++ b/tests/unit/test_config_provider.py @@ -21,6 +21,7 @@ BaseProvider, ChainProvider, ConfigChainFactory, + ConfiguredEndpointProvider, ConfigValueStore, ConstantProvider, DefaultConfigResolver, @@ -363,6 +364,60 @@ def test_can_get_config_provider_non_chain_provider(self): self.assertIsInstance(provider, ConstantProvider) self.assertEqual(value, 'bar') + def test_deepcopy_preserves_overrides(self): + provider = ConstantProvider(100) + config_store = ConfigValueStore(mapping={'fake_variable': provider}) + config_store.set_config_variable('fake_variable', 'override-value') + + config_store_deepcopy = copy.deepcopy(config_store) + + value = config_store_deepcopy.get_config_variable('fake_variable') + self.assertEqual(value, 'override-value') + + def test_copy_preserves_provider_identities(self): + fake_variable_provider = ConstantProvider(100) + config_store = ConfigValueStore( + mapping={ + 'fake_variable': fake_variable_provider, + } + ) + + config_store_copy = copy.copy(config_store) + + self.assertIs( + config_store.get_config_provider('fake_variable'), + config_store_copy.get_config_provider('fake_variable'), + ) + + def test_copy_preserves_overrides(self): + provider = ConstantProvider(100) + config_store = ConfigValueStore(mapping={'fake_variable': provider}) + config_store.set_config_variable('fake_variable', 'override-value') + + config_store_copy = copy.copy(config_store) + + value = config_store_copy.get_config_variable('fake_variable') + self.assertEqual(value, 'override-value') + + def test_copy_update_does_not_mutate_source_config_store(self): + fake_variable_provider = ConstantProvider(100) + config_store = ConfigValueStore( + mapping={ + 'fake_variable': fake_variable_provider, + } + ) + + config_store_copy = copy.copy(config_store) + + another_variable_provider = ConstantProvider('ABC') + + config_store_copy.set_config_provider( + 'fake_variable', another_variable_provider + ) + + assert config_store.get_config_variable('fake_variable') == 100 + assert config_store_copy.get_config_variable('fake_variable') == 'ABC' + class TestInstanceVarProvider(unittest.TestCase): def assert_provides_value(self, name, instance_map, expected_value): @@ -633,8 +688,8 @@ def fake_session(self): return fake_session def _create_config_value_store(self, s3_mapping={}, **override_kwargs): - provider_foo = ConstantProvider(value='foo') - environment_provider_foo = EnvironmentProvider( + constant_provider = ConstantProvider(value='my_sts_regional_endpoint') + environment_provider = EnvironmentProvider( name='AWS_RETRY_MODE', env={'AWS_RETRY_MODE': None} ) fake_session = mock.Mock(spec=session.Session) @@ -642,8 +697,10 @@ def _create_config_value_store(self, s3_mapping={}, **override_kwargs): # Testing with three different providers to validate # SmartDefaultsConfigStoreFactory._get_new_chain_provider mapping = { - 'sts_regional_endpoints': ChainProvider(providers=[provider_foo]), - 'retry_mode': ChainProvider(providers=[environment_provider_foo]), + 'sts_regional_endpoints': ChainProvider( + providers=[constant_provider] + ), + 'retry_mode': ChainProvider(providers=[environment_provider]), 's3': SectionConfigProvider('s3', fake_session, s3_mapping), } mapping.update(**override_kwargs) @@ -657,11 +714,68 @@ def _create_os_environ_patcher(self): def test_config_store_deepcopy(self): config_store = ConfigValueStore() - config_store.set_config_provider('foo', ConstantProvider('bar')) + config_store.set_config_provider( + 'constant_value', ConstantProvider('ABC') + ) config_store_copy = copy.deepcopy(config_store) - config_store_copy.set_config_provider('fizz', ConstantProvider('buzz')) - assert config_store.get_config_variable('fizz') is None - assert config_store_copy.get_config_variable('foo') == 'bar' + config_store_copy.set_config_provider( + 'constant_value_copy', ConstantProvider('123') + ) + assert config_store.get_config_variable('constant_value_copy') is None + assert config_store_copy.get_config_variable('constant_value') == 'ABC' + + def _create_config_value_store_to_test_merge(self): + environment_provider = EnvironmentProvider( + name='AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', + env={}, + ) + + s3_mapping = { + 'us_east_1_regional_endpoint': ChainProvider( + providers=[environment_provider] + ) + } + + override_kwargs = {'connect_timeout': ConstantProvider(value=None)} + + config_value_store = self._create_config_value_store( + s3_mapping=s3_mapping, **override_kwargs + ) + + return config_value_store + + @pytest.mark.parametrize( + 'config_variable,expected_value_before,expected_value_after', + [ + ['retry_mode', None, 'standard'], + ['sts_regional_endpoints', 'my_sts_regional_endpoint', 'regional'], + ['connect_timeout', None, 2], + ['s3', None, {'us_east_1_regional_endpoint': 'regional'}], + ], + ) + def test_config_store_providers_not_mutated_after_merge( + self, + config_variable, + expected_value_before, + expected_value_after, + smart_defaults_factory, + ): + """Test uses the standard default mode from the template""" + + config_value_store = self._create_config_value_store_to_test_merge() + + provider = config_value_store.get_config_provider(config_variable) + + smart_defaults_factory.merge_smart_defaults( + config_value_store, 'standard', 'some-region' + ) + + assert provider.provide() == expected_value_before + + assert ( + config_value_store.get_config_variable(config_variable) + == expected_value_after + ) @pytest.mark.parametrize( 'defaults_mode, retry_mode, sts_regional_endpoints,' @@ -710,7 +824,7 @@ def test_resolve_default_values_on_config( assert config_store.get_config_variable('connect_timeout') == 2 def test_no_resolve_default_s3_values_on_config( - self, smart_defaults_factory, fake_session + self, smart_defaults_factory ): environment_provider = EnvironmentProvider( name='AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', @@ -829,3 +943,206 @@ def test_resolve_auto_mode_imds_region_provider_connect_timeout(self): ) mode = smart_defaults_factory.resolve_auto_mode('us-west-2') assert mode == 'standard' + + +def create_cases(): + service = 'batch' + + return [ + dict( + service=service, + environ_map={}, + full_config_map={}, + expected_value=None, + ), + dict( + service=service, + environ_map={'AWS_ENDPOINT_URL': 'global-from-env'}, + full_config_map={}, + expected_value='global-from-env', + ), + dict( + service=service, + environ_map={ + f'AWS_ENDPOINT_URL_{service.upper()}': 'service-from-env', + 'AWS_ENDPOINT_URL': 'global-from-env', + }, + full_config_map={}, + expected_value='service-from-env', + ), + dict( + service=service, + environ_map={ + 'AWS_ENDPOINT_URL': 'global-from-env', + 'AWS_ENDPOINT_URL_S3': 's3-endpoint-url', + }, + full_config_map={}, + expected_value='global-from-env', + ), + dict( + service=service, + environ_map={}, + full_config_map={ + 'profiles': {'default': {'endpoint_url': 'global-from-config'}} + }, + expected_value='global-from-config', + ), + dict( + service=service, + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + } + }, + 'services': { + 'my-services': { + service: {'endpoint_url': "service-from-config"} + } + }, + }, + expected_value='service-from-config', + ), + dict( + service=service, + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + 'endpoint_url': 'global-from-config', + } + }, + 'services': { + 'my-services': { + service: {'endpoint_url': "service-from-config"} + } + }, + }, + expected_value='service-from-config', + ), + dict( + service=service, + environ_map={ + 'AWS_ENDPOINT_URL': 'global-from-env', + }, + full_config_map={ + 'profiles': { + 'default': { + 'endpoint_url': 'global-from-config', + } + }, + }, + expected_value='global-from-env', + ), + dict( + service=service, + environ_map={ + f'AWS_ENDPOINT_URL_{service.upper()}': 'service-from-env', + }, + full_config_map={ + 'profiles': { + 'default': { + 'endpoint_url': 'global-from-config', + } + }, + }, + expected_value='service-from-env', + ), + dict( + service='s3', + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + 'endpoint_url': 'global-from-config', + } + }, + 'services': { + 'my-services': { + service: {'endpoint_url': "service-from-config"} + } + }, + }, + expected_value='global-from-config', + ), + dict( + service='runtime.sagemaker', + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + } + }, + 'services': { + 'my-services': { + 'sagemaker_runtime': { + 'endpoint_url': "service-from-config" + } + } + }, + }, + expected_value='service-from-config', + ), + dict( + service='apigateway', + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + } + }, + 'services': { + 'my-services': { + 'api_gateway': {'endpoint_url': "service-from-config"} + } + }, + }, + expected_value='service-from-config', + ), + ] + + +class TestConfiguredEndpointProvider: + def assert_does_provide( + self, + service, + environ_map, + full_config_map, + expected_value, + ): + scoped_config_map = full_config_map.get('profiles', {}).get( + 'default', {} + ) + + chain = ConfiguredEndpointProvider( + scoped_config=scoped_config_map, + full_config=full_config_map, + client_name=service, + environ=environ_map, + ) + value = chain.provide() + assert value == expected_value + + @pytest.mark.parametrize('test_case', create_cases()) + def test_does_provide(self, test_case): + self.assert_does_provide(**test_case) + + def test_is_deepcopyable(self): + env = {'AWS_ENDPOINT_URL_BATCH': 'https://endpoint-override'} + provider = ConfiguredEndpointProvider( + full_config={}, scoped_config={}, client_name='batch', environ=env + ) + + provider_deepcopy = copy.deepcopy(provider) + assert provider is not provider_deepcopy + assert provider.provide() == 'https://endpoint-override' + assert provider_deepcopy.provide() == 'https://endpoint-override' + + env['AWS_ENDPOINT_URL_BATCH'] = 'https://another-new-endpoint-override' + assert provider.provide() == 'https://another-new-endpoint-override' + assert provider_deepcopy.provide() == 'https://endpoint-override' diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py index 4a331c9e2d..a6e02f7d4f 100644 --- a/tests/unit/test_configloader.py +++ b/tests/unit/test_configloader.py @@ -188,6 +188,24 @@ def test_sso_session_config(self): self.assertEqual(sso_config['sso_region'], 'us-east-1') self.assertEqual(sso_config['sso_start_url'], 'https://example.com') + def test_services_config(self): + filename = path('aws_services_config') + loaded_config = load_config(filename) + self.assertIn('profiles', loaded_config) + self.assertIn('default', loaded_config['profiles']) + self.assertIn('services', loaded_config) + self.assertIn('my-services', loaded_config['services']) + services_config = loaded_config['services']['my-services'] + self.assertIn('s3', services_config) + self.assertIn('dynamodb', services_config) + self.assertEqual( + services_config['s3']['endpoint_url'], 'https://localhost:5678/' + ) + self.assertEqual( + services_config['dynamodb']['endpoint_url'], + 'https://localhost:8888/', + ) + if __name__ == "__main__": unittest.main() diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index ee4cad5a58..ef2495ebcf 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -772,34 +772,6 @@ def test_param_api_version_overrides_config_value(self, client_creator): ] self.assertEqual(call_kwargs['api_version'], override_api_version) - @mock.patch('botocore.client.ClientCreator') - def test_defaults_mode_resolved_from_config_store(self, client_creator): - config_store = self.session.get_component('config_store') - config_store.set_config_variable('defaults_mode', 'standard') - self.session.create_client('sts', 'us-west-2') - self.assertIsNot(client_creator.call_args[0][-1], config_store) - - @mock.patch('botocore.client.ClientCreator') - def test_defaults_mode_resolved_from_client_config(self, client_creator): - config_store = self.session.get_component('config_store') - config = botocore.config.Config(defaults_mode='standard') - self.session.create_client('sts', 'us-west-2', config=config) - self.assertIsNot(client_creator.call_args[0][-1], config_store) - - @mock.patch('botocore.client.ClientCreator') - def test_defaults_mode_resolved_invalid_mode_exception( - self, client_creator - ): - with self.assertRaises(botocore.exceptions.InvalidDefaultsMode): - config = botocore.config.Config(defaults_mode='foo') - self.session.create_client('sts', 'us-west-2', config=config) - - @mock.patch('botocore.client.ClientCreator') - def test_defaults_mode_resolved_legacy(self, client_creator): - config_store = self.session.get_component('config_store') - self.session.create_client('sts', 'us-west-2') - self.assertIs(client_creator.call_args[0][-1], config_store) - class TestSessionComponent(BaseSessionTest): def test_internal_component(self):