diff --git a/.changes/1.31.0.json b/.changes/1.31.0.json new file mode 100644 index 0000000000..08e708884d --- /dev/null +++ b/.changes/1.31.0.json @@ -0,0 +1,42 @@ +[ + { + "category": "``ec2``", + "description": "Add Nitro Enclaves support on DescribeInstanceTypes", + "type": "api-change" + }, + { + "category": "``location``", + "description": "This release adds support for authenticating with Amazon Location Service's Places & Routes APIs with an API Key. Also, with this release developers can publish tracked device position updates to Amazon EventBridge.", + "type": "api-change" + }, + { + "category": "``outposts``", + "description": "Added paginator support to several APIs. Added the ISOLATED enum value to AssetState.", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "This release includes below three changes: small multiples axes improvement, field based coloring, removed required trait from Aggregation function for TopBottomFilter.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates Amazon RDS documentation for creating DB instances and creating Aurora global clusters.", + "type": "api-change" + }, + { + "category": "configprovider", + "description": "Fix bug when deep copying config value store where overrides were not preserved", + "type": "bugfix" + }, + { + "category": "configprovider", + "description": "Always use shallow copy of session config value store for clients", + "type": "enhancement" + }, + { + "category": "configuration", + "description": "Configure the endpoint URL in the shared configuration file or via an environment variable for a specific AWS service or all AWS services.", + "type": "feature" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index efe51b572d..0e980c471d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,19 @@ CHANGELOG ========= +1.31.0 +====== + +* api-change:``ec2``: Add Nitro Enclaves support on DescribeInstanceTypes +* api-change:``location``: This release adds support for authenticating with Amazon Location Service's Places & Routes APIs with an API Key. Also, with this release developers can publish tracked device position updates to Amazon EventBridge. +* api-change:``outposts``: Added paginator support to several APIs. Added the ISOLATED enum value to AssetState. +* api-change:``quicksight``: This release includes below three changes: small multiples axes improvement, field based coloring, removed required trait from Aggregation function for TopBottomFilter. +* api-change:``rds``: Updates Amazon RDS documentation for creating DB instances and creating Aurora global clusters. +* bugfix:configprovider: Fix bug when deep copying config value store where overrides were not preserved +* enhancement:configprovider: Always use shallow copy of session config value store for clients +* feature:configuration: Configure the endpoint URL in the shared configuration file or via an environment variable for a specific AWS service or all AWS services. + + 1.30.1 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index aa346e87f3..a8a3ed4468 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.30.1' +__version__ = '1.31.0' class NullHandler(logging.Handler): diff --git a/botocore/args.py b/botocore/args.py index 7f4766bf76..73c8ab45e0 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -115,7 +115,7 @@ def get_client_args( s3_config = final_args['s3_config'] partition = endpoint_config['metadata'].get('partition', None) socket_options = final_args['socket_options'] - + configured_endpoint_url = final_args['configured_endpoint_url'] signing_region = endpoint_config['signing_region'] endpoint_region_name = endpoint_config['region_name'] @@ -160,7 +160,7 @@ def get_client_args( service_model, endpoint_region_name, region_name, - endpoint_url, + configured_endpoint_url, endpoint, is_secure, endpoint_bridge, @@ -210,10 +210,16 @@ def compute_client_args( parameter_validation = ensure_boolean(raw_value) s3_config = self.compute_s3_config(client_config) + + configured_endpoint_url = self._compute_configured_endpoint_url( + client_config=client_config, + endpoint_url=endpoint_url, + ) + endpoint_config = self._compute_endpoint_config( service_name=service_name, region_name=region_name, - endpoint_url=endpoint_url, + endpoint_url=configured_endpoint_url, is_secure=is_secure, endpoint_bridge=endpoint_bridge, s3_config=s3_config, @@ -270,6 +276,7 @@ def compute_client_args( return { 'service_name': service_name, 'parameter_validation': parameter_validation, + 'configured_endpoint_url': configured_endpoint_url, 'endpoint_config': endpoint_config, 'protocol': protocol, 'config_kwargs': config_kwargs, @@ -279,6 +286,27 @@ def compute_client_args( ), } + def _compute_configured_endpoint_url(self, client_config, endpoint_url): + if endpoint_url is not None: + return endpoint_url + + if self._ignore_configured_endpoint_urls(client_config): + logger.debug("Ignoring configured endpoint URLs.") + return endpoint_url + + return self._config_store.get_config_variable('endpoint_url') + + def _ignore_configured_endpoint_urls(self, client_config): + if ( + client_config + and client_config.ignore_configured_endpoint_urls is not None + ): + return client_config.ignore_configured_endpoint_urls + + return self._config_store.get_config_variable( + 'ignore_configured_endpoint_urls' + ) + def compute_s3_config(self, client_config): s3_configuration = self._config_store.get_config_variable('s3') diff --git a/botocore/config.py b/botocore/config.py index 6ce25f8b60..be3a475fa7 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -194,6 +194,13 @@ class Config: Defaults to None. + :type ignore_configured_endpoint_urls: bool + :param ignore_configured_endpoint_urls: Setting to True disables use + of endpoint URLs provided via environment variables and + the shared configuration file. + + Defaults to None. + :type tcp_keepalive: bool :param tcp_keepalive: Enables the TCP Keep-Alive socket option used when creating new connections if set to True. @@ -221,6 +228,7 @@ class Config: ('endpoint_discovery_enabled', None), ('use_dualstack_endpoint', None), ('use_fips_endpoint', None), + ('ignore_configured_endpoint_urls', None), ('defaults_mode', None), ('tcp_keepalive', None), ] diff --git a/botocore/configloader.py b/botocore/configloader.py index 245d9d8eb7..0b6c82bcad 100644 --- a/botocore/configloader.py +++ b/botocore/configloader.py @@ -200,6 +200,17 @@ def _parse_nested(config_value): return parsed +def _parse_section(key, values): + result = {} + try: + parts = shlex.split(key) + except ValueError: + return result + if len(parts) == 2: + result[parts[1]] = values + return result + + def build_profile_map(parsed_ini_config): """Convert the parsed INI config into a profile map. @@ -254,22 +265,15 @@ def build_profile_map(parsed_ini_config): parsed_config = copy.deepcopy(parsed_ini_config) profiles = {} sso_sessions = {} + services = {} final_config = {} for key, values in parsed_config.items(): if key.startswith("profile"): - try: - parts = shlex.split(key) - except ValueError: - continue - if len(parts) == 2: - profiles[parts[1]] = values + profiles.update(_parse_section(key, values)) elif key.startswith("sso-session"): - try: - parts = shlex.split(key) - except ValueError: - continue - if len(parts) == 2: - sso_sessions[parts[1]] = values + sso_sessions.update(_parse_section(key, values)) + elif key.startswith("services"): + services.update(_parse_section(key, values)) elif key == 'default': # default section is special and is considered a profile # name but we don't require you use 'profile "default"' @@ -279,4 +283,5 @@ def build_profile_map(parsed_ini_config): final_config[key] = values final_config['profiles'] = profiles final_config['sso_sessions'] = sso_sessions + final_config['services'] = services return final_config diff --git a/botocore/configprovider.py b/botocore/configprovider.py index e2ebd5efe3..d7b2e19de8 100644 --- a/botocore/configprovider.py +++ b/botocore/configprovider.py @@ -18,6 +18,7 @@ import os from botocore import utils +from botocore.exceptions import InvalidConfigError logger = logging.getLogger(__name__) @@ -108,6 +109,12 @@ None, utils.ensure_boolean, ), + 'ignore_configured_endpoint_urls': ( + 'ignore_configured_endpoint_urls', + 'AWS_IGNORE_CONFIGURED_ENDPOINT_URLS', + None, + utils.ensure_boolean, + ), 'parameter_validation': ('parameter_validation', None, True, None), # Client side monitoring configurations. # Note: These configurations are considered internal to botocore. @@ -403,7 +410,18 @@ def __init__(self, mapping=None): self.set_config_provider(logical_name, provider) def __deepcopy__(self, memo): - return ConfigValueStore(copy.deepcopy(self._mapping, memo)) + config_store = ConfigValueStore(copy.deepcopy(self._mapping, memo)) + for logical_name, override_value in self._overrides.items(): + config_store.set_config_variable(logical_name, override_value) + + return config_store + + def __copy__(self): + config_store = ConfigValueStore(copy.copy(self._mapping)) + for logical_name, override_value in self._overrides.items(): + config_store.set_config_variable(logical_name, override_value) + + return config_store def get_config_variable(self, logical_name): """ @@ -543,24 +561,28 @@ def resolve_auto_mode(self, region_name): return 'standard' def _update_provider(self, config_store, variable, value): - provider = config_store.get_config_provider(variable) + original_provider = config_store.get_config_provider(variable) default_provider = ConstantProvider(value) - if isinstance(provider, ChainProvider): - provider.set_default_provider(default_provider) - return - elif isinstance(provider, BaseProvider): + if isinstance(original_provider, ChainProvider): + chain_provider_copy = copy.deepcopy(original_provider) + chain_provider_copy.set_default_provider(default_provider) + default_provider = chain_provider_copy + elif isinstance(original_provider, BaseProvider): default_provider = ChainProvider( - providers=[provider, default_provider] + providers=[original_provider, default_provider] ) config_store.set_config_provider(variable, default_provider) def _update_section_provider( self, config_store, section_name, variable, value ): - section_provider = config_store.get_config_provider(section_name) - section_provider.set_default_provider( + section_provider_copy = copy.deepcopy( + config_store.get_config_provider(section_name) + ) + section_provider_copy.set_default_provider( variable, ConstantProvider(value) ) + config_store.set_config_provider(section_name, section_provider_copy) def _set_retryMode(self, config_store, value): self._update_provider(config_store, 'retry_mode', value) @@ -837,3 +859,142 @@ def provide(self): def __repr__(self): return 'ConstantProvider(value=%s)' % self._value + + +class ConfiguredEndpointProvider(BaseProvider): + """Lookup an endpoint URL from environment variable or shared config file. + + NOTE: This class is considered private and is subject to abrupt breaking + changes or removal without prior announcement. Please do not use it + directly. + """ + + _ENDPOINT_URL_LOOKUP_ORDER = [ + 'environment_service', + 'environment_global', + 'config_service', + 'config_global', + ] + + def __init__( + self, + full_config, + scoped_config, + client_name, + environ=None, + ): + """Initialize a ConfiguredEndpointProviderChain. + + :type full_config: dict + :param full_config: This is the dict representing the full + configuration file. + + :type scoped_config: dict + :param scoped_config: This is the dict representing the configuration + for the current profile for the session. + + :type client_name: str + :param client_name: The name used to instantiate a client using + botocore.session.Session.create_client. + + :type environ: dict + :param environ: A mapping to use for environment variables. If this + is not provided it will default to use os.environ. + """ + self._full_config = full_config + self._scoped_config = scoped_config + self._client_name = client_name + self._transformed_service_id = self._get_snake_case_service_id( + self._client_name + ) + if environ is None: + environ = os.environ + self._environ = environ + + def provide(self): + """Lookup the configured endpoint URL. + + The order is: + + 1. The value provided by a service-specific environment variable. + 2. The value provided by the global endpoint environment variable + (AWS_ENDPOINT_URL). + 3. The value provided by a service-specific parameter from a services + definition section in the shared configuration file. + 4. The value provided by the global parameter from a services + definition section in the shared configuration file. + """ + for location in self._ENDPOINT_URL_LOOKUP_ORDER: + logger.debug( + 'Looking for endpoint for %s via: %s', + self._client_name, + location, + ) + + endpoint_url = getattr(self, f'_get_endpoint_url_{location}')() + + if endpoint_url: + logger.info( + 'Found endpoint for %s via: %s.', + self._client_name, + location, + ) + return endpoint_url + + logger.debug('No configured endpoint found.') + return None + + def _get_snake_case_service_id(self, client_name): + # Get the service ID without loading the service data file, accounting + # for any aliases and standardizing the names with hyphens. + client_name = utils.SERVICE_NAME_ALIASES.get(client_name, client_name) + hyphenized_service_id = ( + utils.CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES.get( + client_name, client_name + ) + ) + return hyphenized_service_id.replace('-', '_') + + def _get_service_env_var_name(self): + transformed_service_id_env = self._transformed_service_id.upper() + return f'AWS_ENDPOINT_URL_{transformed_service_id_env}' + + def _get_services_config(self): + if 'services' not in self._scoped_config: + return {} + + section_name = self._scoped_config['services'] + services_section = self._full_config.get('services', {}).get( + section_name + ) + + if not services_section: + error_msg = ( + f'The profile is configured to use the services ' + f'section but the "{section_name}" services ' + f'configuration does not exist.' + ) + raise InvalidConfigError(error_msg=error_msg) + + return services_section + + def _get_endpoint_url_config_service(self): + snakecase_service_id = self._transformed_service_id.lower() + return ( + self._get_services_config() + .get(snakecase_service_id, {}) + .get('endpoint_url') + ) + + def _get_endpoint_url_config_global(self): + return self._scoped_config.get('endpoint_url') + + def _get_endpoint_url_environment_service(self): + return EnvironmentProvider( + name=self._get_service_env_var_name(), env=self._environ + ).provide() + + def _get_endpoint_url_environment_global(self): + return EnvironmentProvider( + name='AWS_ENDPOINT_URL', env=self._environ + ).provide() diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index e3c1e8bb59..3a860699dc 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -5805,7 +5805,7 @@ }, "input":{"shape":"StopInstancesRequest"}, "output":{"shape":"StopInstancesResult"}, - "documentation":"
Stops an Amazon EBS-backed instance. For more information, see Stop and start your instance in the Amazon EC2 User Guide.
You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.
You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.
When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.
Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.
When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide.
" + "documentation":"Stops an Amazon EBS-backed instance. For more information, see Stop and start your instance in the Amazon EC2 User Guide.
You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.
You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide.
When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.
Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.
When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide.
" }, "TerminateClientVpnConnections":{ "name":"TerminateClientVpnConnections", @@ -30918,21 +30918,21 @@ "members":{ "Configured":{ "shape":"Boolean", - "documentation":"If this parameter is set to true
, your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.
If true
, your instance is enabled for hibernation; otherwise, it is not enabled for hibernation.
Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
" + "documentation":"Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
" }, "HibernationOptionsRequest":{ "type":"structure", "members":{ "Configured":{ "shape":"Boolean", - "documentation":"If you set this parameter to true
, your instance is enabled for hibernation.
Default: false
Set to true
to enable your instance for hibernation.
Default: false
Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
" + "documentation":"Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
" }, "HistoryRecord":{ "type":"structure", @@ -35406,6 +35406,11 @@ "shape":"BootModeTypeList", "documentation":"The supported boot modes. For more information, see Boot modes in the Amazon EC2 User Guide.
", "locationName":"supportedBootModes" + }, + "NitroEnclavesSupport":{ + "shape":"NitroEnclavesSupport", + "documentation":"Indicates whether Nitro Enclaves is supported.
", + "locationName":"nitroEnclavesSupport" } }, "documentation":"Describes the instance type.
" @@ -43643,6 +43648,13 @@ } }, "NextToken":{"type":"string"}, + "NitroEnclavesSupport":{ + "type":"string", + "enum":[ + "unsupported", + "supported" + ] + }, "OccurrenceDayRequestSet":{ "type":"list", "member":{ @@ -44543,7 +44555,7 @@ }, "PlacementGroupArn":{ "type":"string", - "pattern":"^arn:aws([a-z-]+)?:ec2:[a-z\\d-]+:\\d{12}:placement-group/([^\\s].+[^\\s]){1,255}$" + "pattern":"^arn:aws([a-z-]+)?:ec2:[a-z\\d-]+:\\d{12}:placement-group/^.{1,255}$" }, "PlacementGroupId":{"type":"string"}, "PlacementGroupIdStringList":{ @@ -49124,7 +49136,7 @@ }, "HibernationOptions":{ "shape":"HibernationOptionsRequest", - "documentation":"Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance.
" + "documentation":"Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide.
You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance.
" }, "LicenseSpecifications":{ "shape":"LicenseSpecificationListRequest", @@ -49137,7 +49149,7 @@ }, "EnclaveOptions":{ "shape":"EnclaveOptionsRequest", - "documentation":"Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.
You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.
" + "documentation":"Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.
You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.
" }, "PrivateDnsNameOptions":{ "shape":"PrivateDnsNameOptionsRequest", @@ -52784,7 +52796,7 @@ "documentation":"The ID of the Client VPN endpoint to which the client is connected.
" }, "ConnectionId":{ - "shape":"VpnConnectionId", + "shape":"String", "documentation":"The ID of the client connection to be terminated.
" }, "Username":{ diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 4f3162af01..7a6715618e 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -10156,12 +10156,14 @@ }, "mediaconnect" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, diff --git a/botocore/data/location/2020-11-19/service-2.json b/botocore/data/location/2020-11-19/service-2.json index e58db3bed5..dd0bb40ca0 100644 --- a/botocore/data/location/2020-11-19/service-2.json +++ b/botocore/data/location/2020-11-19/service-2.json @@ -223,7 +223,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"Creates an API key resource in your Amazon Web Services account, which lets you grant geo:GetMap*
actions for Amazon Location Map resources to the API key bearer.
The API keys feature is in preview. We may add, change, or remove features before announcing general availability. For more information, see Using API keys.
Creates an API key resource in your Amazon Web Services account, which lets you grant actions for Amazon Location resources to the API key bearer.
For more information, see Using API keys.
Retrieves the API key resource details.
The API keys feature is in preview. We may add, change, or remove features before announcing general availability. For more information, see Using API keys.
Retrieves the API key resource details.
", "endpoint":{"hostPrefix":"metadata."} }, "DescribeMap":{ @@ -786,7 +786,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"Lists API key resources in your Amazon Web Services account.
The API keys feature is in preview. We may add, change, or remove features before announcing general availability. For more information, see Using API keys.
Lists API key resources in your Amazon Web Services account.
", "endpoint":{"hostPrefix":"metadata."} }, "ListMaps":{ @@ -1051,7 +1051,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"Updates the specified properties of a given API key resource.
The API keys feature is in preview. We may add, change, or remove features before announcing general availability. For more information, see Using API keys.
Updates the specified properties of a given API key resource.
", "endpoint":{"hostPrefix":"metadata."}, "idempotent":true }, @@ -1184,7 +1184,7 @@ "members":{ "AllowActions":{ "shape":"ApiKeyRestrictionsAllowActionsList", - "documentation":"A list of allowed actions that an API key resource grants permissions to perform
Currently, the only valid action is geo:GetMap*
as an input to the list. For example, [\"geo:GetMap*\"]
is valid but [\"geo:GetMapTile\"]
is not.
A list of allowed actions that an API key resource grants permissions to perform. You must have at least one action for each type of resource. For example, if you have a place resource, you must include at least one place action.
The following are valid values for the actions.
Map actions
geo:GetMap*
- Allows all actions needed for map rendering.
Place actions
geo:SearchPlaceIndexForText
- Allows geocoding.
geo:SearchPlaceIndexForPosition
- Allows reverse geocoding.
geo:SearchPlaceIndexForSuggestions
- Allows generating suggestions from text.
GetPlace
- Allows finding a place by place ID.
Route actions
geo:CalculateRoute
- Allows point to point routing.
geo:CalculateRouteMatrix
- Allows calculating a matrix of routes.
You must use these strings exactly. For example, to provide access to map rendering, the only valid action is geo:GetMap*
as an input to the list. [\"geo:GetMap*\"]
is valid but [\"geo:GetMapTile\"]
is not. Similarly, you cannot use [\"geo:SearchPlaceIndexFor*\"]
- you must list each of the Place actions separately.
A list of allowed resource ARNs that a API key bearer can perform actions on
For more information about ARN format, see Amazon Resource Names (ARNs).
In this preview, you can allow only map resources.
Requirements:
Must be prefixed with arn
.
partition
and service
must not be empty and should begin with only alphanumeric characters (A–Z, a–z, 0–9) and contain only alphanumeric numbers, hyphens (-) and periods (.).
region
and account-id
can be empty or should begin with only alphanumeric characters (A–Z, a–z, 0–9) and contain only alphanumeric numbers, hyphens (-) and periods (.).
resource-id
can begin with any character except for forward slash (/) and contain any characters after, including forward slashes to form a path.
resource-id
can also include wildcard characters, denoted by an asterisk (*).
arn
, partition
, service
, region
, account-id
and resource-id
must be delimited by a colon (:).
No spaces allowed. For example, arn:aws:geo:region:account-id:map/ExampleMap*
.
A list of allowed resource ARNs that a API key bearer can perform actions on.
The ARN must be the correct ARN for a map, place, or route ARN. You may include wildcards in the resource-id to match multiple resources of the same type.
The resources must be in the same partition
, region
, and account-id
as the key that is being created.
Other than wildcards, you must include the full ARN, including the arn
, partition
, service
, region
, account-id
and resource-id
, delimited by colons (:).
No spaces allowed, even with wildcards. For example, arn:aws:geo:region:account-id:map/ExampleMap*
.
For more information about ARN format, see Amazon Resource Names (ARNs).
" } }, "documentation":"API Restrictions on the allowed actions, resources, and referers for an API key resource.
" @@ -1576,7 +1576,7 @@ }, "GeofenceProperties":{ "shape":"PropertyMap", - "documentation":"Specifies additional user-defined properties to store with the Geofence. An array of key-value pairs.
" + "documentation":"Associates one of more properties with the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.
Format: \"key\" : \"value\"
Set the unit system to specify the distance.
Default Value: Kilometers
The optional API key to authorize the request.
", + "location":"querystring", + "locationName":"key" + }, "TravelMode":{ "shape":"TravelMode", "documentation":"Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility.
The TravelMode
you specify also determines how you specify route preferences:
If traveling by Car
use the CarModeOptions
parameter.
If traveling by Truck
use the TruckModeOptions
parameter.
Bicycle
or Motorcycle
are only valid when using Grab
as a data provider, and only within Southeast Asia.
Truck
is not available for Grab.
For more information about using Grab as a data provider, see GrabMaps in the Amazon Location Service Developer Guide.
Default Value: Car
Set to include the geometry details in the result for each path between a pair of positions.
Default Value: false
Valid Values: false
| true
The optional API key to authorize the request.
", + "location":"querystring", + "locationName":"key" + }, "TravelMode":{ "shape":"TravelMode", "documentation":"Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. You can choose Car
, Truck
, Walking
, Bicycle
or Motorcycle
as options for the TravelMode
.
Bicycle
and Motorcycle
are only valid when using Grab as a data provider, and only within Southeast Asia.
Truck
is not available for Grab.
For more details on the using Grab for routing, including areas of coverage, see GrabMaps in the Amazon Location Service Developer Guide.
The TravelMode
you specify also determines how you specify route preferences:
If traveling by Car
use the CarModeOptions
parameter.
If traveling by Truck
use the TruckModeOptions
parameter.
Default Value: Car
An optional description for the tracker resource.
" }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"Whether to enable position UPDATE
events from this tracker to be sent to EventBridge.
You do not need enable this feature to get ENTER
and EXIT
events for geofences with this tracker. Those events are always sent to EventBridge.
A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN.
" @@ -2848,6 +2864,10 @@ "shape":"ResourceDescription", "documentation":"The optional description for the tracker resource.
" }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"Whether UPDATE
events from this tracker in EventBridge are enabled. If set to true
these events will be sent to EventBridge.
A key identifier for an Amazon Web Services KMS customer managed key assigned to the Amazon Location resource.
" @@ -3179,7 +3199,7 @@ }, "GeofenceProperties":{ "shape":"PropertyMap", - "documentation":"Contains additional user-defined properties stored with the geofence. An array of key-value pairs.
" + "documentation":"User defined properties of the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.
Format: \"key\" : \"value\"
The optional API key to authorize the request.
", + "location":"querystring", + "locationName":"key" + }, "Language":{ "shape":"LanguageTag", "documentation":"The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en
for English.
This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.
For an example, we'll use the Greek language. You search for a location around Athens, Greece, with the language
parameter set to en
. The city
in the results will most likely be returned as Athens
.
If you set the language
parameter to el
, for Greek, then the city
in the results will more likely be returned as Αθήνα
.
If the data provider does not have a value for Greek, the result will be in a language that the provider does support.
", @@ -3750,7 +3776,7 @@ }, "GeofenceProperties":{ "shape":"PropertyMap", - "documentation":"Contains additional user-defined properties stored with the geofence. An array of key-value pairs.
" + "documentation":"User defined properties of the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.
Format: \"key\" : \"value\"
Specifies additional user-defined properties to store with the Geofence. An array of key-value pairs.
" + "documentation":"Associates one of more properties with the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence.
Format: \"key\" : \"value\"
The optional API key to authorize the request.
", + "location":"querystring", + "locationName":"key" + }, "Language":{ "shape":"LanguageTag", "documentation":"The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en
for English.
This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.
For an example, we'll use the Greek language. You search for a location around Athens, Greece, with the language
parameter set to en
. The city
in the results will most likely be returned as Athens
.
If you set the language
parameter to el
, for Greek, then the city
in the results will more likely be returned as Αθήνα
.
If the data provider does not have a value for Greek, the result will be in a language that the provider does support.
" @@ -4789,6 +4821,12 @@ "location":"uri", "locationName":"IndexName" }, + "Key":{ + "shape":"ApiKey", + "documentation":"The optional API key to authorize the request.
", + "location":"querystring", + "locationName":"key" + }, "Language":{ "shape":"LanguageTag", "documentation":"The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en
for English.
This setting affects the languages used in the results. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.
For an example, we'll use the Greek language. You search for Athens, Gr
to get suggestions with the language
parameter set to en
. The results found will most likely be returned as Athens, Greece
.
If you set the language
parameter to el
, for Greek, then the result found will more likely be returned as Αθήνα, Ελλάδα
.
If the data provider does not have a value for Greek, the result will be in a language that the provider does support.
" @@ -4903,6 +4941,12 @@ "location":"uri", "locationName":"IndexName" }, + "Key":{ + "shape":"ApiKey", + "documentation":"The optional API key to authorize the request.
", + "location":"querystring", + "locationName":"key" + }, "Language":{ "shape":"LanguageTag", "documentation":"The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en
for English.
This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.
For an example, we'll use the Greek language. You search for Athens, Greece
, with the language
parameter set to en
. The result found will most likely be returned as Athens
.
If you set the language
parameter to el
, for Greek, then the result found will more likely be returned as Αθήνα
.
If the data provider does not have a value for Greek, the result will be in a language that the provider does support.
" @@ -5500,6 +5544,10 @@ "shape":"ResourceDescription", "documentation":"Updates the description for the tracker resource.
" }, + "EventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"Whether to enable position UPDATE
events from this tracker to be sent to EventBridge.
You do not need enable this feature to get ENTER
and EXIT
events for geofences with this tracker. Those events are always sent to EventBridge.
Updates the position filtering for the tracker resource.
Valid values:
TimeBased
- Location updates are evaluated against linked geofence collections, but not every location update is stored. If your update frequency is more often than 30 seconds, only one update per 30 seconds is stored for each unique device ID.
DistanceBased
- If the device has moved less than 30 m (98.4 ft), location updates are ignored. Location updates within this distance are neither evaluated against linked geofence collections, nor stored. This helps control costs by reducing the number of geofence evaluations and historical device positions to paginate through. Distance-based filtering can also reduce the effects of GPS noise when displaying device trajectories on a map.
AccuracyBased
- If the device has moved less than the measured accuracy, location updates are ignored. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is ignored if the device has moved less than 15 m. Ignored location updates are neither evaluated against linked geofence collections, nor stored. This helps educe the effects of GPS noise when displaying device trajectories on a map, and can help control costs by reducing the number of geofence evaluations.
The ID of the previous order.
" + "documentation":"The ID of the previous order item.
" } }, "documentation":"Information about a line item.
" @@ -1658,7 +1659,7 @@ }, "OrderType":{ "shape":"OrderType", - "documentation":"The type of order.
" + "documentation":"Type of order.
" } }, "documentation":"Information about an order.
" @@ -1697,7 +1698,7 @@ }, "OrderType":{ "shape":"OrderType", - "documentation":"The type of order.
" + "documentation":"The type of order.
" }, "Status":{ "shape":"OrderStatus", @@ -2083,7 +2084,7 @@ "StatusList":{ "type":"list", "member":{"shape":"AssetState"}, - "max":2, + "max":3, "min":1 }, "String":{ diff --git a/botocore/data/quicksight/2018-04-01/service-2.json b/botocore/data/quicksight/2018-04-01/service-2.json index 704e0bad91..db2b5d68fa 100644 --- a/botocore/data/quicksight/2018-04-01/service-2.json +++ b/botocore/data/quicksight/2018-04-01/service-2.json @@ -2364,7 +2364,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Starts an Asset Bundle export job.
An Asset Bundle export job exports specified Amazon QuickSight assets. You can also choose to export any asset dependencies in the same job. Export jobs run asynchronously and can be polled with a DescribeAssetBundleExportJob
API call. When a job is successfully completed, a download URL that contains the exported assets is returned. The URL is valid for 5 minutes and can be refreshed with a DescribeAssetBundleExportJob
API call. Each Amazon QuickSight account can run up to 10 export jobs concurrently.
The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported.
" + "documentation":"Starts an Asset Bundle export job.
An Asset Bundle export job exports specified Amazon QuickSight assets. You can also choose to export any asset dependencies in the same job. Export jobs run asynchronously and can be polled with a DescribeAssetBundleExportJob
API call. When a job is successfully completed, a download URL that contains the exported assets is returned. The URL is valid for 5 minutes and can be refreshed with a DescribeAssetBundleExportJob
API call. Each Amazon QuickSight account can run up to 5 export jobs concurrently.
The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported.
" }, "StartAssetBundleImportJob":{ "name":"StartAssetBundleImportJob", @@ -2383,7 +2383,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Starts an Asset Bundle import job.
An Asset Bundle import job imports specified Amazon QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon QuickSight account. Each Amazon QuickSight account can run up to 10 import jobs concurrently.
The API caller must have the necessary \"create\"
, \"describe\"
, and \"update\"
permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported.
Starts an Asset Bundle import job.
An Asset Bundle import job imports specified Amazon QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon QuickSight account. Each Amazon QuickSight account can run up to 5 import jobs concurrently.
The API caller must have the necessary \"create\"
, \"describe\"
, and \"update\"
permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported.
The Amazon S3 URI for an asset bundle import file that exists in an Amazon S3 bucket that the caller has read access to. The file must be a zip format file and can't exceed 20 MB.
" } }, - "documentation":"The source of the asset bundle zip file that contains the data that you want to import.
" + "documentation":"The source of the asset bundle zip file that contains the data that you want to import. The file must be in QUICKSIGHT_JSON
format.
A list of up to 50 custom colors.
" + } + }, + "documentation":"The color configurations for a column.
" + }, "ColumnConfiguration":{ "type":"structure", "required":["Column"], @@ -5505,6 +5514,10 @@ "Role":{ "shape":"ColumnRole", "documentation":"The role of the column.
" + }, + "ColorsConfiguration":{ + "shape":"ColorsConfiguration", + "documentation":"The color configurations of the column.
" } }, "documentation":"The general configuration of a column.
" @@ -7808,6 +7821,30 @@ }, "documentation":"The URL operation that opens a link to another webpage.
" }, + "CustomColor":{ + "type":"structure", + "required":["Color"], + "members":{ + "FieldValue":{ + "shape":"FieldValue", + "documentation":"The data value that the color is applied to.
" + }, + "Color":{ + "shape":"HexColor", + "documentation":"The color that is applied to the data value.
" + }, + "SpecialValue":{ + "shape":"SpecialValue", + "documentation":"The value of a special data value.
" + } + }, + "documentation":"Determines the color that's applied to a particular data value in a column.
" + }, + "CustomColorsList":{ + "type":"list", + "member":{"shape":"CustomColor"}, + "max":50 + }, "CustomContentConfiguration":{ "type":"structure", "members":{ @@ -11335,7 +11372,7 @@ }, "ExportFormat":{ "shape":"AssetBundleExportFormat", - "documentation":"The format of the export.
" + "documentation":"The format of the exported asset bundle. A QUICKSIGHT_JSON
formatted file can be used to make a StartAssetBundleImportJob
API call. A CLOUDFORMATION_JSON
formatted file can be used in the CloudFormation console and with the CloudFormation APIs.
The display options of a control.
" }, + "SmallMultiplesAxisPlacement":{ + "type":"string", + "enum":[ + "OUTSIDE", + "INSIDE" + ] + }, + "SmallMultiplesAxisProperties":{ + "type":"structure", + "members":{ + "Scale":{ + "shape":"SmallMultiplesAxisScale", + "documentation":"Determines whether scale of the axes are shared or independent. The default value is SHARED
.
Defines the placement of the axis. By default, axes are rendered OUTSIDE
of the panels. Axes with INDEPENDENT
scale are rendered INSIDE
the panels.
Configures the properties of a chart's axes that are used by small multiples panels.
" + }, + "SmallMultiplesAxisScale":{ + "type":"string", + "enum":[ + "SHARED", + "INDEPENDENT" + ] + }, "SmallMultiplesDimensionFieldList":{ "type":"list", "member":{"shape":"DimensionField"}, @@ -23884,6 +23949,14 @@ "PanelConfiguration":{ "shape":"PanelConfiguration", "documentation":"Configures the display options for each small multiples panel.
" + }, + "XAxis":{ + "shape":"SmallMultiplesAxisProperties", + "documentation":"The properties of a small multiples X axis.
" + }, + "YAxis":{ + "shape":"SmallMultiplesAxisProperties", + "documentation":"The properties of a small multiples Y axis.
" } }, "documentation":"Options that determine the layout and display options of a chart's small multiples.
" @@ -23958,6 +24031,14 @@ }, "documentation":"The parameters for Spark.
" }, + "SpecialValue":{ + "type":"string", + "enum":[ + "EMPTY", + "NULL", + "OTHER" + ] + }, "SqlEndpointPath":{ "type":"string", "max":4096, @@ -24080,7 +24161,7 @@ }, "AssetBundleImportSource":{ "shape":"AssetBundleImportSource", - "documentation":"The source of the asset bundle zip file that contains the data that you want to import.
" + "documentation":"The source of the asset bundle zip file that contains the data that you want to import. The file must be in QUICKSIGHT_JSON
format.
Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This action applies only to Aurora DB clusters.
Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This operation applies only to Aurora DB clusters.
The meaning of this parameter differs depending on the database engine.
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.
Constraints:
Must contain 1 to 64 alphanumeric characters.
Can't be a word reserved by the database engine.
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created.
Default: postgres
Constraints:
Must contain 1 to 63 alphanumeric characters.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created RDS Custom DB instance.
Default: ORCL
Constraints:
Must contain 1 to 8 alphanumeric characters.
Must contain a letter.
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created DB instance.
Default: ORCL
Constraints:
Can't be longer than 8 characters.
Can't be a word reserved by the database engine, such as the string NULL
.
The name of the database to create when the DB instance is created.
Default: postgres
Constraints:
Must contain 1 to 63 letters, numbers, or underscores.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The meaning of this parameter differs depending on the database engine.
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.
Constraints:
Must contain 1 to 64 alphanumeric characters.
Can't be a word reserved by the database engine.
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created.
Default: postgres
Constraints:
Must contain 1 to 63 alphanumeric characters.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created RDS Custom DB instance.
Default: ORCL
Constraints:
Must contain 1 to 8 alphanumeric characters.
Must contain a letter.
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created DB instance.
Default: ORCL
Constraints:
Can't be longer than 8 characters.
Can't be a word reserved by the database engine, such as the string NULL
.
The name of the database to create when the DB instance is created.
Default: postgres
Constraints:
Must contain 1 to 63 letters, numbers, or underscores.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The amount of storage in gibibytes (GiB) to allocate for the DB instance.
This setting doesn't apply to Amazon Aurora DB instances. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.
Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
Magnetic storage (standard): Must be an integer from 5 to 3072.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
Magnetic storage (standard): Must be an integer from 5 to 3072.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
Magnetic storage (standard): Must be an integer from 10 to 3072.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
Magnetic storage (standard): Must be an integer from 5 to 3072.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3):
Enterprise and Standard editions: Must be an integer from 20 to 16384.
Web and Express editions: Must be an integer from 20 to 16384.
Provisioned IOPS storage (io1):
Enterprise and Standard editions: Must be an integer from 100 to 16384.
Web and Express editions: Must be an integer from 100 to 16384.
Magnetic storage (standard):
Enterprise and Standard editions: Must be an integer from 20 to 1024.
Web and Express editions: Must be an integer from 20 to 1024.
The amount of storage in gibibytes (GiB) to allocate for the DB instance.
This setting doesn't apply to Amazon Aurora DB instances. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.
Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
Magnetic storage (standard): Must be an integer from 5 to 3072.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
Magnetic storage (standard): Must be an integer from 5 to 3072.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
Magnetic storage (standard): Must be an integer from 10 to 3072.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536.
Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.
Magnetic storage (standard): Must be an integer from 5 to 3072.
Constraints to the amount of storage for each storage type are the following:
General Purpose (SSD) storage (gp2, gp3):
Enterprise and Standard editions: Must be an integer from 20 to 16384.
Web and Express editions: Must be an integer from 20 to 16384.
Provisioned IOPS storage (io1):
Enterprise and Standard editions: Must be an integer from 100 to 16384.
Web and Express editions: Must be an integer from 100 to 16384.
Magnetic storage (standard):
Enterprise and Standard editions: Must be an integer from 20 to 1024.
Web and Express editions: Must be an integer from 20 to 1024.
The version number of the database engine to use.
This setting doesn't apply to Amazon Aurora DB instances. The version number of the database engine the DB instance uses is managed by the DB cluster.
For a list of valid engine versions, use the DescribeDBEngineVersions
operation.
The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1
. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.
See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.
For information, see MariaDB on Amazon RDS versions in the Amazon RDS User Guide.
For information, see Microsoft SQL Server versions on Amazon RDS in the Amazon RDS User Guide.
For information, see MySQL on Amazon RDS versions in the Amazon RDS User Guide.
For information, see Oracle Database Engine release notes in the Amazon RDS User Guide.
For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.
The version number of the database engine to use.
This setting doesn't apply to Amazon Aurora DB instances. The version number of the database engine the DB instance uses is managed by the DB cluster.
For a list of valid engine versions, use the DescribeDBEngineVersions
operation.
The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1
. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.
See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.
For information, see MariaDB on Amazon RDS versions in the Amazon RDS User Guide.
For information, see Microsoft SQL Server versions on Amazon RDS in the Amazon RDS User Guide.
For information, see MySQL on Amazon RDS versions in the Amazon RDS User Guide.
For information, see Oracle Database Engine release notes in the Amazon RDS User Guide.
For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.
Specifies the fully qualified domain name of an Active Directory domain.
Constraints:
Cannot be greater than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The fully qualified domain name (FQDN) of an Active Directory domain.
Constraints:
Can't be longer than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Cannot be greater than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Can't be longer than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
The ARN for the Secrets Manager secret with the credentials for the user joining the domain.
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
Spcifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.
" + "documentation":"Specifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.
" }, "MonitoringInterval":{ "shape":"IntegerOptional", @@ -4303,15 +4303,15 @@ }, "DomainFqdn":{ "shape":"String", - "documentation":"Specifies the fully qualified domain name of an Active Directory domain.
Constraints:
Cannot be greater than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The fully qualified domain name (FQDN) of an Active Directory domain.
Constraints:
Can't be longer than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Cannot be greater than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Can't be longer than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
The ARN for the Secrets Manager secret with the credentials for the user joining the domain.
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
The cluster identifier of the new global database cluster. This parameter is stored as a lowercase string.
" + "documentation":"The cluster identifier for this global database cluster. This parameter is stored as a lowercase string.
" }, "SourceDBClusterIdentifier":{ "shape":"String", - "documentation":"The Amazon Resource Name (ARN) to use as the primary cluster of the global database. This parameter is optional.
" + "documentation":"The Amazon Resource Name (ARN) to use as the primary cluster of the global database.
If you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:
DatabaseName
Engine
EngineVersion
StorageEncrypted
The name of the database engine to be used for this DB cluster.
" + "documentation":"The database engine to use for this global database cluster.
Valid Values: aurora-mysql | aurora-postgresql
Constraints:
Can't be specified if SourceDBClusterIdentifier
is specified. In this case, Amazon Aurora uses the engine of the source DB cluster.
The engine version of the Aurora global database.
" + "documentation":"The engine version to use for this global database cluster.
Constraints:
Can't be specified if SourceDBClusterIdentifier
is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.
The deletion protection setting for the new global database. The global database can't be deleted when deletion protection is enabled.
" + "documentation":"Specifies whether to enable deletion protection for the new global database cluster. The global database can't be deleted when deletion protection is enabled.
" }, "DatabaseName":{ "shape":"String", - "documentation":"The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating.
" + "documentation":"The name for your database of up to 64 alphanumeric characters. If you don't specify a name, Amazon Aurora doesn't create a database in the global database cluster.
Constraints:
Can't be specified if SourceDBClusterIdentifier
is specified. In this case, Amazon Aurora uses the database name from the source DB cluster.
The storage encryption setting for the new global database cluster.
" + "documentation":"Specifies whether to enable storage encryption for the new global database cluster.
Constraints:
Can't be specified if SourceDBClusterIdentifier
is specified. In this case, Amazon Aurora uses the setting from the source DB cluster.
The status of the Active Directory Domain membership for the DB instance or cluster. Values include joined, pending-join, failed, and so on.
" + "documentation":"The status of the Active Directory Domain membership for the DB instance or cluster. Values include joined
, pending-join
, failed
, and so on.
The name of the IAM role to be used when making API calls to the Directory Service.
" + "documentation":"The name of the IAM role used when making API calls to the Directory Service.
" }, "OU":{ "shape":"String", - "documentation":"The Active Directory organizational unit for your DB instance to join.
" + "documentation":"The Active Directory organizational unit for the DB instance or cluster.
" }, "AuthSecretArn":{ "shape":"String", - "documentation":"The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.
" + "documentation":"The ARN for the Secrets Manager secret with the credentials for the user that's a member of the domain.
" }, "DnsIps":{ "shape":"StringList", - "documentation":"The IPv4 DNS IP addresses of your primary and secondary Active Directory domain controllers.
" + "documentation":"The IPv4 DNS IP addresses of the primary and secondary Active Directory domain controllers.
" } }, "documentation":"An Active Directory Domain membership record associated with the DB instance or cluster.
" @@ -10672,15 +10672,15 @@ }, "DomainFqdn":{ "shape":"String", - "documentation":"Specifies the fully qualified domain name of an Active Directory domain.
Constraints:
Cannot be greater than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The fully qualified domain name (FQDN) of an Active Directory domain.
Constraints:
Can't be longer than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Cannot be greater than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Can't be longer than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
The ARN for the Secrets Manager secret with the credentials for the user joining the domain.
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
Boolean. If present, removes the instance from the Active Directory domain.
" + "documentation":"Specifies whether to remove the DB instance from the Active Directory domain.
" }, "PromotionTier":{ "shape":"IntegerOptional", @@ -13087,15 +13087,15 @@ }, "DomainFqdn":{ "shape":"String", - "documentation":"Specifies the fully qualified domain name of an Active Directory domain.
Constraints:
Cannot be greater than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The fully qualified domain name (FQDN) of an Active Directory domain.
Constraints:
Can't be longer than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Cannot be greater than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Can't be longer than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.
Constraints:
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
The ARN for the Secrets Manager secret with the credentials for the user joining the domain.
Constraints:
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
Specify the name of the IAM role to be used when making API calls to the Directory Service.
This setting doesn't apply to RDS Custom.
" + "documentation":"The name of the IAM role to use when making API calls to the Directory Service.
This setting doesn't apply to RDS Custom DB instances.
" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -13477,19 +13477,19 @@ }, "DomainIAMRoleName":{ "shape":"String", - "documentation":"Specify the name of the IAM role to be used when making API calls to the Directory Service.
This setting doesn't apply to RDS Custom.
" + "documentation":"The name of the IAM role to use when making API calls to the Directory Service.
This setting doesn't apply to RDS Custom DB instances.
" }, "DomainFqdn":{ "shape":"String", - "documentation":"Specifies the fully qualified domain name of an Active Directory domain.
Constraints:
Cannot be greater than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The fully qualified domain name (FQDN) of an Active Directory domain.
Constraints:
Can't be longer than 64 characters.
Example: mymanagedADtest.mymanagedAD.mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Cannot be greater than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The Active Directory organizational unit for your DB instance to join.
Constraints:
Must be in the distinguished name format.
Can't be longer than 64 characters.
Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain
The ARN for the Secrets Manager secret that contains the credentials for the user performing the domain join.
Constraints:
Cannot be greater than 64 characters.
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
The ARN for the Secrets Manager secret with the credentials for the user joining the domain.
Constraints:
Can't be longer than 64 characters.
Example: arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456
FOR TESTING ONLY: The STANDARD mode provides the latest recommended default values that should be safe to run in most scenarios
Note that the default values vended from this mode might change as best practices may evolve. As a result, it is encouraged to perform tests when upgrading the SDK
", + "in-region": "FOR TESTING ONLY: The IN_REGION mode builds on the standard mode and includes optimization tailored for applications which call AWS services from within the same AWS region
Note that the default values vended from this mode might change as best practices may evolve. As a result, it is encouraged to perform tests when upgrading the SDK
", + "cross-region": "FOR TESTING ONLY: The CROSS_REGION mode builds on the standard mode and includes optimization tailored for applications which call AWS services in a different region
Note that the default values vended from this mode might change as best practices may evolve. As a result, it is encouraged to perform tests when upgrading the SDK
", + "mobile": "FOR TESTING ONLY: The MOBILE mode builds on the standard mode and includes optimization tailored for mobile applications
Note that the default values vended from this mode might change as best practices may evolve. As a result, it is encouraged to perform tests when upgrading the SDK
", + "auto": "FOR TESTING ONLY: The AUTO mode is an experimental mode that builds on the standard mode. The SDK will attempt to discover the execution environment to determine the appropriate settings automatically.
Note that the auto detection is heuristics-based and does not guarantee 100% accuracy. STANDARD mode will be used if the execution environment cannot be determined. The auto detection might query EC2 Instance Metadata service, which might introduce latency. Therefore we recommend choosing an explicit defaults_mode instead if startup latency is critical to your application
", + "legacy": "FOR TESTING ONLY: The LEGACY mode provides default settings that vary per SDK and were used prior to establishment of defaults_mode
" + }, + "configuration": { + "retryMode": "FOR TESTING ONLY: A retry mode specifies how the SDK attempts retries. See Retry Mode
", + "stsRegionalEndpoints": "FOR TESTING ONLY: Specifies how the SDK determines the AWS service endpoint that it uses to talk to the AWS Security Token Service (AWS STS). See Setting STS Regional endpoints
", + "s3UsEast1RegionalEndpoints": "FOR TESTING ONLY: Specifies how the SDK determines the AWS service endpoint that it uses to talk to the Amazon S3 for the us-east-1 region
", + "connectTimeoutInMillis": "FOR TESTING ONLY: The amount of time after making an initial connection attempt on a socket, where if the client does not receive a completion of the connect handshake, the client gives up and fails the operation
", + "tlsNegotiationTimeoutInMillis": "FOR TESTING ONLY: The maximum amount of time that a TLS handshake is allowed to take from the time the CLIENT HELLO message is sent to ethe time the client and server have fully negotiated ciphers and exchanged keys
" + } + } +} diff --git a/tests/functional/test_config_provider.py b/tests/functional/test_config_provider.py index 0206286919..57b3fd4457 100644 --- a/tests/functional/test_config_provider.py +++ b/tests/functional/test_config_provider.py @@ -10,8 +10,11 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +from pathlib import Path + import pytest +import botocore.exceptions from botocore.config import Config from botocore.session import get_session @@ -28,6 +31,13 @@ sdk_default_configuration = loader.load_data('sdk-default-configuration') +def assert_client_uses_standard_defaults(client): + assert client.meta.config.s3['us_east_1_regional_endpoint'] == 'regional' + assert client.meta.config.connect_timeout == 3.1 + assert client.meta.endpoint_url == 'https://sts.us-west-2.amazonaws.com' + assert client.meta.config.retries['mode'] == 'standard' + + @pytest.mark.parametrize("mode", sdk_default_configuration['base']) def test_no_new_sdk_default_configuration_values(mode): err_msg = ( @@ -45,7 +55,69 @@ def test_default_configurations_resolve_correctly(): client = session.create_client( 'sts', config=config, region_name='us-west-2' ) + assert_client_uses_standard_defaults(client) + + +@pytest.fixture +def loader(): + test_models_dir = Path(__file__).parent / 'models' + loader = botocore.loaders.Loader() + loader.search_paths.insert(0, test_models_dir) + return loader + + +@pytest.fixture +def session(loader): + session = botocore.session.Session() + session.register_component('data_loader', loader) + return session + + +def assert_client_uses_legacy_defaults(client): + assert client.meta.config.s3 is None + assert client.meta.config.connect_timeout == 60 + assert client.meta.endpoint_url == 'https://sts.amazonaws.com' + assert client.meta.config.retries['mode'] == 'legacy' + + +def assert_client_uses_testing_defaults(client): assert client.meta.config.s3['us_east_1_regional_endpoint'] == 'regional' - assert client.meta.config.connect_timeout == 3.1 - assert client.meta.endpoint_url == 'https://sts.us-west-2.amazonaws.com' + assert client.meta.config.connect_timeout == 9999 + assert client.meta.endpoint_url == 'https://sts.amazonaws.com' assert client.meta.config.retries['mode'] == 'standard' + + +class TestConfigurationDefaults: + def test_defaults_mode_resolved_from_config_store(self, session): + config_store = session.get_component('config_store') + config_store.set_config_variable('defaults_mode', 'standard') + client = session.create_client('sts', 'us-west-2') + assert_client_uses_testing_defaults(client) + + def test_no_mutate_session_provider(self, session): + # Using the standard default mode should change the connect timeout + # on the client, but not the session + standard_client = session.create_client( + 'sts', 'us-west-2', config=Config(defaults_mode='standard') + ) + assert_client_uses_testing_defaults(standard_client) + + # Using the legacy default mode should not change the connect timeout + # on the client or the session. By default the connect timeout for a client + # is 60 seconds, and unset on the session. + legacy_client = session.create_client('sts', 'us-west-2') + assert_client_uses_legacy_defaults(legacy_client) + + def test_defaults_mode_resolved_from_client_config(self, session): + config = Config(defaults_mode='standard') + client = session.create_client('sts', 'us-west-2', config=config) + assert_client_uses_testing_defaults(client) + + def test_defaults_mode_resolved_invalid_mode_exception(self, session): + with pytest.raises(botocore.exceptions.InvalidDefaultsMode): + config = Config(defaults_mode='invalid_default_mode') + session.create_client('sts', 'us-west-2', config=config) + + def test_defaults_mode_resolved_legacy(self, session): + client = session.create_client('sts', 'us-west-2') + assert_client_uses_legacy_defaults(client) diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py index 59ce8f0438..1372f8ff89 100644 --- a/tests/functional/test_endpoints.py +++ b/tests/functional/test_endpoints.py @@ -13,69 +13,7 @@ import pytest from botocore.session import get_session - -SERVICE_RENAMES = { - # Actual service name we use -> Allowed computed service name. - 'alexaforbusiness': 'alexa-for-business', - 'apigateway': 'api-gateway', - 'application-autoscaling': 'application-auto-scaling', - 'appmesh': 'app-mesh', - 'autoscaling': 'auto-scaling', - 'autoscaling-plans': 'auto-scaling-plans', - 'ce': 'cost-explorer', - 'cloudhsmv2': 'cloudhsm-v2', - 'cloudsearchdomain': 'cloudsearch-domain', - 'cognito-idp': 'cognito-identity-provider', - 'config': 'config-service', - 'cur': 'cost-and-usage-report-service', - 'datapipeline': 'data-pipeline', - 'directconnect': 'direct-connect', - 'devicefarm': 'device-farm', - 'discovery': 'application-discovery-service', - 'dms': 'database-migration-service', - 'ds': 'directory-service', - 'dynamodbstreams': 'dynamodb-streams', - 'elasticbeanstalk': 'elastic-beanstalk', - 'elastictranscoder': 'elastic-transcoder', - 'elb': 'elastic-load-balancing', - 'elbv2': 'elastic-load-balancing-v2', - 'es': 'elasticsearch-service', - 'events': 'eventbridge', - 'globalaccelerator': 'global-accelerator', - 'iot-data': 'iot-data-plane', - 'iot-jobs-data': 'iot-jobs-data-plane', - 'iot1click-devices': 'iot-1click-devices-service', - 'iot1click-projects': 'iot-1click-projects', - 'iotevents-data': 'iot-events-data', - 'iotevents': 'iot-events', - 'iotwireless': 'iot-wireless', - 'kinesisanalytics': 'kinesis-analytics', - 'kinesisanalyticsv2': 'kinesis-analytics-v2', - 'kinesisvideo': 'kinesis-video', - 'lex-models': 'lex-model-building-service', - 'lexv2-models': 'lex-models-v2', - 'lex-runtime': 'lex-runtime-service', - 'lexv2-runtime': 'lex-runtime-v2', - 'logs': 'cloudwatch-logs', - 'machinelearning': 'machine-learning', - 'marketplacecommerceanalytics': 'marketplace-commerce-analytics', - 'marketplace-entitlement': 'marketplace-entitlement-service', - 'meteringmarketplace': 'marketplace-metering', - 'mgh': 'migration-hub', - 'sms-voice': 'pinpoint-sms-voice', - 'resourcegroupstaggingapi': 'resource-groups-tagging-api', - 'route53': 'route-53', - 'route53domains': 'route-53-domains', - 's3control': 's3-control', - 'sdb': 'simpledb', - 'secretsmanager': 'secrets-manager', - 'serverlessrepo': 'serverlessapplicationrepository', - 'servicecatalog': 'service-catalog', - 'servicecatalog-appregistry': 'service-catalog-appregistry', - 'stepfunctions': 'sfn', - 'storagegateway': 'storage-gateway', -} - +from botocore.utils import CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES ENDPOINT_PREFIX_OVERRIDE = { # entry in endpoints.json -> actual endpoint prefix. @@ -163,7 +101,7 @@ def test_endpoint_matches_service(endpoint_prefix): @pytest.mark.parametrize("service_name", AVAILABLE_SERVICES) -def test_service_name_matches_endpoint_prefix(service_name): +def test_client_name_matches_hyphenized_service_id(service_name): """Generates tests for each service to verify that the computed service named based on the service id matches the service name used to create a client (i.e the directory name in botocore/data) @@ -174,7 +112,9 @@ def test_service_name_matches_endpoint_prefix(service_name): # Handle known exceptions where we have renamed the service directory # for one reason or another. - actual_service_name = SERVICE_RENAMES.get(service_name, service_name) + actual_service_name = CLIENT_NAME_TO_HYPHENIZED_SERVICE_ID_OVERRIDES.get( + service_name, service_name + ) err_msg = ( f"Actual service name `{actual_service_name}` does not match " diff --git a/tests/unit/cfg/aws_services_config b/tests/unit/cfg/aws_services_config new file mode 100644 index 0000000000..cf9040c2a4 --- /dev/null +++ b/tests/unit/cfg/aws_services_config @@ -0,0 +1,9 @@ +[default] +endpoint_url = https://localhost:1234/ +services = my-services + +[services my-services] +s3 = + endpoint_url = https://localhost:5678/ +dynamodb = + endpoint_url = https://localhost:8888/ diff --git a/tests/unit/test_config_provider.py b/tests/unit/test_config_provider.py index 64760d9d24..4a7d91b0d6 100644 --- a/tests/unit/test_config_provider.py +++ b/tests/unit/test_config_provider.py @@ -21,6 +21,7 @@ BaseProvider, ChainProvider, ConfigChainFactory, + ConfiguredEndpointProvider, ConfigValueStore, ConstantProvider, DefaultConfigResolver, @@ -363,6 +364,60 @@ def test_can_get_config_provider_non_chain_provider(self): self.assertIsInstance(provider, ConstantProvider) self.assertEqual(value, 'bar') + def test_deepcopy_preserves_overrides(self): + provider = ConstantProvider(100) + config_store = ConfigValueStore(mapping={'fake_variable': provider}) + config_store.set_config_variable('fake_variable', 'override-value') + + config_store_deepcopy = copy.deepcopy(config_store) + + value = config_store_deepcopy.get_config_variable('fake_variable') + self.assertEqual(value, 'override-value') + + def test_copy_preserves_provider_identities(self): + fake_variable_provider = ConstantProvider(100) + config_store = ConfigValueStore( + mapping={ + 'fake_variable': fake_variable_provider, + } + ) + + config_store_copy = copy.copy(config_store) + + self.assertIs( + config_store.get_config_provider('fake_variable'), + config_store_copy.get_config_provider('fake_variable'), + ) + + def test_copy_preserves_overrides(self): + provider = ConstantProvider(100) + config_store = ConfigValueStore(mapping={'fake_variable': provider}) + config_store.set_config_variable('fake_variable', 'override-value') + + config_store_copy = copy.copy(config_store) + + value = config_store_copy.get_config_variable('fake_variable') + self.assertEqual(value, 'override-value') + + def test_copy_update_does_not_mutate_source_config_store(self): + fake_variable_provider = ConstantProvider(100) + config_store = ConfigValueStore( + mapping={ + 'fake_variable': fake_variable_provider, + } + ) + + config_store_copy = copy.copy(config_store) + + another_variable_provider = ConstantProvider('ABC') + + config_store_copy.set_config_provider( + 'fake_variable', another_variable_provider + ) + + assert config_store.get_config_variable('fake_variable') == 100 + assert config_store_copy.get_config_variable('fake_variable') == 'ABC' + class TestInstanceVarProvider(unittest.TestCase): def assert_provides_value(self, name, instance_map, expected_value): @@ -633,8 +688,8 @@ def fake_session(self): return fake_session def _create_config_value_store(self, s3_mapping={}, **override_kwargs): - provider_foo = ConstantProvider(value='foo') - environment_provider_foo = EnvironmentProvider( + constant_provider = ConstantProvider(value='my_sts_regional_endpoint') + environment_provider = EnvironmentProvider( name='AWS_RETRY_MODE', env={'AWS_RETRY_MODE': None} ) fake_session = mock.Mock(spec=session.Session) @@ -642,8 +697,10 @@ def _create_config_value_store(self, s3_mapping={}, **override_kwargs): # Testing with three different providers to validate # SmartDefaultsConfigStoreFactory._get_new_chain_provider mapping = { - 'sts_regional_endpoints': ChainProvider(providers=[provider_foo]), - 'retry_mode': ChainProvider(providers=[environment_provider_foo]), + 'sts_regional_endpoints': ChainProvider( + providers=[constant_provider] + ), + 'retry_mode': ChainProvider(providers=[environment_provider]), 's3': SectionConfigProvider('s3', fake_session, s3_mapping), } mapping.update(**override_kwargs) @@ -657,11 +714,68 @@ def _create_os_environ_patcher(self): def test_config_store_deepcopy(self): config_store = ConfigValueStore() - config_store.set_config_provider('foo', ConstantProvider('bar')) + config_store.set_config_provider( + 'constant_value', ConstantProvider('ABC') + ) config_store_copy = copy.deepcopy(config_store) - config_store_copy.set_config_provider('fizz', ConstantProvider('buzz')) - assert config_store.get_config_variable('fizz') is None - assert config_store_copy.get_config_variable('foo') == 'bar' + config_store_copy.set_config_provider( + 'constant_value_copy', ConstantProvider('123') + ) + assert config_store.get_config_variable('constant_value_copy') is None + assert config_store_copy.get_config_variable('constant_value') == 'ABC' + + def _create_config_value_store_to_test_merge(self): + environment_provider = EnvironmentProvider( + name='AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', + env={}, + ) + + s3_mapping = { + 'us_east_1_regional_endpoint': ChainProvider( + providers=[environment_provider] + ) + } + + override_kwargs = {'connect_timeout': ConstantProvider(value=None)} + + config_value_store = self._create_config_value_store( + s3_mapping=s3_mapping, **override_kwargs + ) + + return config_value_store + + @pytest.mark.parametrize( + 'config_variable,expected_value_before,expected_value_after', + [ + ['retry_mode', None, 'standard'], + ['sts_regional_endpoints', 'my_sts_regional_endpoint', 'regional'], + ['connect_timeout', None, 2], + ['s3', None, {'us_east_1_regional_endpoint': 'regional'}], + ], + ) + def test_config_store_providers_not_mutated_after_merge( + self, + config_variable, + expected_value_before, + expected_value_after, + smart_defaults_factory, + ): + """Test uses the standard default mode from the template""" + + config_value_store = self._create_config_value_store_to_test_merge() + + provider = config_value_store.get_config_provider(config_variable) + + smart_defaults_factory.merge_smart_defaults( + config_value_store, 'standard', 'some-region' + ) + + assert provider.provide() == expected_value_before + + assert ( + config_value_store.get_config_variable(config_variable) + == expected_value_after + ) @pytest.mark.parametrize( 'defaults_mode, retry_mode, sts_regional_endpoints,' @@ -710,7 +824,7 @@ def test_resolve_default_values_on_config( assert config_store.get_config_variable('connect_timeout') == 2 def test_no_resolve_default_s3_values_on_config( - self, smart_defaults_factory, fake_session + self, smart_defaults_factory ): environment_provider = EnvironmentProvider( name='AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', @@ -829,3 +943,206 @@ def test_resolve_auto_mode_imds_region_provider_connect_timeout(self): ) mode = smart_defaults_factory.resolve_auto_mode('us-west-2') assert mode == 'standard' + + +def create_cases(): + service = 'batch' + + return [ + dict( + service=service, + environ_map={}, + full_config_map={}, + expected_value=None, + ), + dict( + service=service, + environ_map={'AWS_ENDPOINT_URL': 'global-from-env'}, + full_config_map={}, + expected_value='global-from-env', + ), + dict( + service=service, + environ_map={ + f'AWS_ENDPOINT_URL_{service.upper()}': 'service-from-env', + 'AWS_ENDPOINT_URL': 'global-from-env', + }, + full_config_map={}, + expected_value='service-from-env', + ), + dict( + service=service, + environ_map={ + 'AWS_ENDPOINT_URL': 'global-from-env', + 'AWS_ENDPOINT_URL_S3': 's3-endpoint-url', + }, + full_config_map={}, + expected_value='global-from-env', + ), + dict( + service=service, + environ_map={}, + full_config_map={ + 'profiles': {'default': {'endpoint_url': 'global-from-config'}} + }, + expected_value='global-from-config', + ), + dict( + service=service, + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + } + }, + 'services': { + 'my-services': { + service: {'endpoint_url': "service-from-config"} + } + }, + }, + expected_value='service-from-config', + ), + dict( + service=service, + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + 'endpoint_url': 'global-from-config', + } + }, + 'services': { + 'my-services': { + service: {'endpoint_url': "service-from-config"} + } + }, + }, + expected_value='service-from-config', + ), + dict( + service=service, + environ_map={ + 'AWS_ENDPOINT_URL': 'global-from-env', + }, + full_config_map={ + 'profiles': { + 'default': { + 'endpoint_url': 'global-from-config', + } + }, + }, + expected_value='global-from-env', + ), + dict( + service=service, + environ_map={ + f'AWS_ENDPOINT_URL_{service.upper()}': 'service-from-env', + }, + full_config_map={ + 'profiles': { + 'default': { + 'endpoint_url': 'global-from-config', + } + }, + }, + expected_value='service-from-env', + ), + dict( + service='s3', + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + 'endpoint_url': 'global-from-config', + } + }, + 'services': { + 'my-services': { + service: {'endpoint_url': "service-from-config"} + } + }, + }, + expected_value='global-from-config', + ), + dict( + service='runtime.sagemaker', + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + } + }, + 'services': { + 'my-services': { + 'sagemaker_runtime': { + 'endpoint_url': "service-from-config" + } + } + }, + }, + expected_value='service-from-config', + ), + dict( + service='apigateway', + environ_map={}, + full_config_map={ + 'profiles': { + 'default': { + 'services': 'my-services', + } + }, + 'services': { + 'my-services': { + 'api_gateway': {'endpoint_url': "service-from-config"} + } + }, + }, + expected_value='service-from-config', + ), + ] + + +class TestConfiguredEndpointProvider: + def assert_does_provide( + self, + service, + environ_map, + full_config_map, + expected_value, + ): + scoped_config_map = full_config_map.get('profiles', {}).get( + 'default', {} + ) + + chain = ConfiguredEndpointProvider( + scoped_config=scoped_config_map, + full_config=full_config_map, + client_name=service, + environ=environ_map, + ) + value = chain.provide() + assert value == expected_value + + @pytest.mark.parametrize('test_case', create_cases()) + def test_does_provide(self, test_case): + self.assert_does_provide(**test_case) + + def test_is_deepcopyable(self): + env = {'AWS_ENDPOINT_URL_BATCH': 'https://endpoint-override'} + provider = ConfiguredEndpointProvider( + full_config={}, scoped_config={}, client_name='batch', environ=env + ) + + provider_deepcopy = copy.deepcopy(provider) + assert provider is not provider_deepcopy + assert provider.provide() == 'https://endpoint-override' + assert provider_deepcopy.provide() == 'https://endpoint-override' + + env['AWS_ENDPOINT_URL_BATCH'] = 'https://another-new-endpoint-override' + assert provider.provide() == 'https://another-new-endpoint-override' + assert provider_deepcopy.provide() == 'https://endpoint-override' diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py index 4a331c9e2d..a6e02f7d4f 100644 --- a/tests/unit/test_configloader.py +++ b/tests/unit/test_configloader.py @@ -188,6 +188,24 @@ def test_sso_session_config(self): self.assertEqual(sso_config['sso_region'], 'us-east-1') self.assertEqual(sso_config['sso_start_url'], 'https://example.com') + def test_services_config(self): + filename = path('aws_services_config') + loaded_config = load_config(filename) + self.assertIn('profiles', loaded_config) + self.assertIn('default', loaded_config['profiles']) + self.assertIn('services', loaded_config) + self.assertIn('my-services', loaded_config['services']) + services_config = loaded_config['services']['my-services'] + self.assertIn('s3', services_config) + self.assertIn('dynamodb', services_config) + self.assertEqual( + services_config['s3']['endpoint_url'], 'https://localhost:5678/' + ) + self.assertEqual( + services_config['dynamodb']['endpoint_url'], + 'https://localhost:8888/', + ) + if __name__ == "__main__": unittest.main() diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index ee4cad5a58..ef2495ebcf 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -772,34 +772,6 @@ def test_param_api_version_overrides_config_value(self, client_creator): ] self.assertEqual(call_kwargs['api_version'], override_api_version) - @mock.patch('botocore.client.ClientCreator') - def test_defaults_mode_resolved_from_config_store(self, client_creator): - config_store = self.session.get_component('config_store') - config_store.set_config_variable('defaults_mode', 'standard') - self.session.create_client('sts', 'us-west-2') - self.assertIsNot(client_creator.call_args[0][-1], config_store) - - @mock.patch('botocore.client.ClientCreator') - def test_defaults_mode_resolved_from_client_config(self, client_creator): - config_store = self.session.get_component('config_store') - config = botocore.config.Config(defaults_mode='standard') - self.session.create_client('sts', 'us-west-2', config=config) - self.assertIsNot(client_creator.call_args[0][-1], config_store) - - @mock.patch('botocore.client.ClientCreator') - def test_defaults_mode_resolved_invalid_mode_exception( - self, client_creator - ): - with self.assertRaises(botocore.exceptions.InvalidDefaultsMode): - config = botocore.config.Config(defaults_mode='foo') - self.session.create_client('sts', 'us-west-2', config=config) - - @mock.patch('botocore.client.ClientCreator') - def test_defaults_mode_resolved_legacy(self, client_creator): - config_store = self.session.get_component('config_store') - self.session.create_client('sts', 'us-west-2') - self.assertIs(client_creator.call_args[0][-1], config_store) - class TestSessionComponent(BaseSessionTest): def test_internal_component(self):