diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index d02799d44..56c2245bb 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -673,7 +673,7 @@ "properties": { "CrlDistributionPointExtensionConfiguration": { "$ref": "#/definitions/AWS::ACMPCA::CertificateAuthority.CrlDistributionPointExtensionConfiguration", - "markdownDescription": "", + "markdownDescription": "Configures the default behavior of the CRL Distribution Point extension for certificates issued by your CA. If this field is not provided, then the CRL Distribution Point extension will be present and contain the default CRL URL.", "title": "CrlDistributionPointExtensionConfiguration" }, "CustomCname": { @@ -708,7 +708,7 @@ "additionalProperties": false, "properties": { "OmitExtension": { - "markdownDescription": "", + "markdownDescription": "Configures whether the CRL Distribution Point extension should be populated with the default URL to the CRL. If set to `true` , then the CDP extension will not be present in any certificates issued by that CA unless otherwise specified through CSR or API passthrough.\n\n> Only set this if you have another way to distribute the CRL Distribution Points for certificates issued by your CA, such as the Matter Distributed Compliance Ledger.\n> \n> This configuration cannot be enabled with a custom CNAME set.", "title": "OmitExtension", "type": "boolean" } @@ -4930,7 +4930,7 @@ "title": "AccessLogSetting" }, "CacheClusterEnabled": { - "markdownDescription": "Specifies whether a cache cluster is enabled for the stage.", + "markdownDescription": "Specifies whether a cache cluster is enabled for the stage. To activate a method-level cache, set `CachingEnabled` to `true` for a method.", "title": "CacheClusterEnabled", "type": "boolean" }, @@ -6268,7 +6268,7 @@ "title": "AccessLogSetting" }, "CacheClusterEnabled": { - "markdownDescription": "Specifies whether a cache cluster is enabled for the stage.", + "markdownDescription": "Specifies whether a cache cluster is enabled for the stage. To activate a method-level cache, set `CachingEnabled` to `true` for a method.", "title": "CacheClusterEnabled", "type": "boolean" }, @@ -9175,13 +9175,9 @@ "additionalProperties": false, "properties": { "AlarmArn": { - "markdownDescription": "Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.", - "title": "AlarmArn", "type": "string" }, "AlarmRoleArn": { - "markdownDescription": "ARN of an AWS Identity and Access Management (IAM) role for AWS AppConfig to monitor `AlarmArn` .", - "title": "AlarmRoleArn", "type": "string" } }, @@ -9191,13 +9187,9 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", - "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "The tag value can be up to 256 characters.", - "title": "Value", "type": "string" } }, @@ -22351,12 +22343,12 @@ "additionalProperties": false, "properties": { "MaxHealthyPercentage": { - "markdownDescription": "Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. After it's set, a value of `-1` will clear the previously set value.\n\nBoth `MinHealthyPercentage` and `MaxHealthyPercentage` must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.", + "markdownDescription": "Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. To clear a previously set value, specify a value of `-1` .\n\nBoth `MinHealthyPercentage` and `MaxHealthyPercentage` must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.", "title": "MaxHealthyPercentage", "type": "number" }, "MinHealthyPercentage": { - "markdownDescription": "Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. After it's set, a value of `-1` will clear the previously set value.", + "markdownDescription": "Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. To clear a previously set value, specify a value of `-1` .", "title": "MinHealthyPercentage", "type": "number" } @@ -22481,7 +22473,7 @@ "title": "NetworkInterfaceCount" }, "OnDemandMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.\n\nDefault: `20`", + "markdownDescription": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per instance price.\n\nDefault: `20`", "title": "OnDemandMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -22491,7 +22483,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for Spot Instances. This is the maximum you\u2019ll pay for a Spot Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.\n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -29545,7 +29537,7 @@ }, "EncryptionSpecification": { "$ref": "#/definitions/AWS::Cassandra::Table.EncryptionSpecification", - "markdownDescription": "The encryption at rest options for the table.\n\n- *AWS owned key* (default) - The key is owned by Amazon Keyspaces.\n- *Customer managed key* - The key is stored in your account and is created, owned, and managed by you.\n\n> If you choose encryption with a customer managed key, you must specify a valid customer managed KMS key with permissions granted to Amazon Keyspaces.\n\nFor more information, see [Encryption at rest in Amazon Keyspaces](https://docs.aws.amazon.com/keyspaces/latest/devguide/EncryptionAtRest.html) in the *Amazon Keyspaces Developer Guide* .", + "markdownDescription": "The encryption at rest options for the table.\n\n- *AWS owned key* (default) - The key is owned by Amazon Keyspaces .\n- *Customer managed key* - The key is stored in your account and is created, owned, and managed by you.\n\n> If you choose encryption with a customer managed key, you must specify a valid customer managed KMS key with permissions granted to Amazon Keyspaces.\n\nFor more information, see [Encryption at rest in Amazon Keyspaces](https://docs.aws.amazon.com/keyspaces/latest/devguide/EncryptionAtRest.html) in the *Amazon Keyspaces Developer Guide* .", "title": "EncryptionSpecification" }, "KeyspaceName": { @@ -33724,7 +33716,7 @@ "items": { "$ref": "#/definitions/AWS::CloudFront::Distribution.FunctionAssociation" }, - "markdownDescription": "A list of CloudFront functions that are associated with this cache behavior. CloudFront functions must be published to the `LIVE` stage to associate them with a cache behavior.", + "markdownDescription": "A list of CloudFront functions that are associated with this cache behavior. Your functions must be published to the `LIVE` stage to associate them with a cache behavior.", "title": "FunctionAssociations", "type": "array" }, @@ -33918,7 +33910,7 @@ "title": "ViewerCertificate" }, "WebACLId": { - "markdownDescription": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `473e64fd-f30b-4765-81a0-62ad96dd167a` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) .", + "markdownDescription": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) .", "title": "WebACLId", "type": "string" } @@ -34483,7 +34475,7 @@ "items": { "$ref": "#/definitions/AWS::CloudFront::Function.KeyValueStoreAssociation" }, - "markdownDescription": "The configuration for the Key Value Store associations.", + "markdownDescription": "The configuration for the key value store associations.", "title": "KeyValueStoreAssociations", "type": "array" }, @@ -34514,7 +34506,7 @@ "additionalProperties": false, "properties": { "KeyValueStoreARN": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Key Value Store association.", + "markdownDescription": "The Amazon Resource Name (ARN) of the key value store association.", "title": "KeyValueStoreARN", "type": "string" } @@ -34655,17 +34647,17 @@ "additionalProperties": false, "properties": { "Comment": { - "markdownDescription": "A comment for the Key Value Store.", + "markdownDescription": "A comment for the key value store.", "title": "Comment", "type": "string" }, "ImportSource": { "$ref": "#/definitions/AWS::CloudFront::KeyValueStore.ImportSource", - "markdownDescription": "The import source for the Key Value Store.", + "markdownDescription": "The import source for the key value store.", "title": "ImportSource" }, "Name": { - "markdownDescription": "The name of the Key Value Store.", + "markdownDescription": "The name of the key value store.", "title": "Name", "type": "string" } @@ -34700,12 +34692,12 @@ "additionalProperties": false, "properties": { "SourceArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the import source for the Key Value Store.", + "markdownDescription": "The Amazon Resource Name (ARN) of the import source for the key value store.", "title": "SourceArn", "type": "string" }, "SourceType": { - "markdownDescription": "The source type of the import source for the Key Value Store.", + "markdownDescription": "The source type of the import source for the key value store.", "title": "SourceType", "type": "string" } @@ -34890,7 +34882,7 @@ "type": "string" }, "Name": { - "markdownDescription": "A name to identify the origin access control.", + "markdownDescription": "A name to identify the origin access control. You can specify up to 64 characters.", "title": "Name", "type": "string" }, @@ -35717,7 +35709,7 @@ }, "StrictTransportSecurity": { "$ref": "#/definitions/AWS::CloudFront::ResponseHeadersPolicy.StrictTransportSecurity", - "markdownDescription": "Determines whether CloudFront includes the `Strict-Transport-Security` HTTP response header and the header's value.\n\nFor more information about the `Strict-Transport-Security` HTTP response header, see [Strict-Transport-Security](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) in the MDN Web Docs.", + "markdownDescription": "Determines whether CloudFront includes the `Strict-Transport-Security` HTTP response header and the header's value.\n\nFor more information about the `Strict-Transport-Security` HTTP response header, see [Security headers](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/understanding-response-headers-policies.html#understanding-response-headers-policies-security) in the *Amazon CloudFront Developer Guide* and [Strict-Transport-Security](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) in the MDN Web Docs.", "title": "StrictTransportSecurity" }, "XSSProtection": { @@ -38510,6 +38502,8 @@ "additionalProperties": false, "properties": { "FleetArn": { + "markdownDescription": "Specifies the compute fleet ARN for the build project.", + "title": "FleetArn", "type": "string" } }, @@ -40572,7 +40566,7 @@ "type": "string" }, "PipelineType": { - "markdownDescription": "CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.\n\n- V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.\n- V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.\n\n> Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs. \n\nFor information about pricing for CodePipeline, see [Pricing](https://docs.aws.amazon.com/https://aws.amazon.com/codepipeline/pricing/) .\n\nFor information about which type of pipeline to choose, see [What type of pipeline is right for me?](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-types-planning.html) .", + "markdownDescription": "CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.\n\n- V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.\n- V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.\n\n> Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs. \n\nFor information about pricing for CodePipeline, see [Pricing](https://docs.aws.amazon.com/codepipeline/pricing/) .\n\nFor information about which type of pipeline to choose, see [What type of pipeline is right for me?](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-types-planning.html) .", "title": "PipelineType", "type": "string" }, @@ -40831,7 +40825,7 @@ "items": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPushFilter" }, - "markdownDescription": "The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.\n\n> Git tags is the only supported event type.", + "markdownDescription": "The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.", "title": "Push", "type": "array" }, @@ -41598,7 +41592,7 @@ "additionalProperties": false, "properties": { "CreatedBy": { - "markdownDescription": "", + "markdownDescription": "The name or email alias of the person who created the notification rule.", "title": "CreatedBy", "type": "string" }, @@ -41608,7 +41602,7 @@ "type": "string" }, "EventTypeId": { - "markdownDescription": "", + "markdownDescription": "The event type associated with this notification rule. For a complete list of event types and IDs, see [Notification concepts](https://docs.aws.amazon.com/dtconsole/latest/userguide/concepts.html#concepts-api) in the *Developer Tools Console User Guide* .", "title": "EventTypeId", "type": "string" }, @@ -41647,7 +41641,7 @@ "type": "object" }, "TargetAddress": { - "markdownDescription": "", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SNS topic or AWS Chatbot client.", "title": "TargetAddress", "type": "string" }, @@ -41655,7 +41649,7 @@ "items": { "$ref": "#/definitions/AWS::CodeStarNotifications::NotificationRule.Target" }, - "markdownDescription": "A list of Amazon Resource Names (ARNs) of Amazon Simple Notification Service topics and AWS Chatbot clients to associate with the notification rule.", + "markdownDescription": "A list of Amazon Resource Names (ARNs) of Amazon SNS topics and AWS Chatbot clients to associate with the notification rule.", "title": "Targets", "type": "array" } @@ -41699,7 +41693,7 @@ "type": "string" }, "TargetType": { - "markdownDescription": "The target type. Can be an Amazon Simple Notification Service topic or AWS Chatbot client.\n\n- Amazon Simple Notification Service topics are specified as `SNS` .\n- AWS Chatbot clients are specified as `AWSChatbotSlack` .", + "markdownDescription": "The target type. Can be an Amazon Simple Notification Service topic or AWS Chatbot client.\n\n- Amazon Simple Notification Service topics are specified as `SNS` .\n- AWS Chatbot clients are specified as `AWSChatbotSlack` .\n- AWS Chatbot clients for Microsoft Teams are specified as `AWSChatbotMicrosoftTeams` .", "title": "TargetType", "type": "string" } @@ -42093,7 +42087,7 @@ "additionalProperties": false, "properties": { "AmbiguousRoleResolution": { - "markdownDescription": "Specifies the action to be taken if either no rules match the claim value for the Rules type, or there is no `cognito:preferred_role` claim and there are multiple `cognito:roles` matches for the Token type. If you specify Token or Rules as the Type, AmbiguousRoleResolution is required.\n\nValid values are `AuthenticatedRole` or `Deny` .", + "markdownDescription": "If you specify Token or Rules as the `Type` , `AmbiguousRoleResolution` is required.\n\nSpecifies the action to be taken if either no rules match the claim value for the `Rules` type, or there is no `cognito:preferred_role` claim and there are multiple `cognito:roles` matches for the `Token` type.", "title": "AmbiguousRoleResolution", "type": "string" }, @@ -42108,7 +42102,7 @@ "title": "RulesConfiguration" }, "Type": { - "markdownDescription": "The role-mapping type. `Token` uses `cognito:roles` and `cognito:preferred_role` claims from the Amazon Cognito identity provider token to map groups to roles. `Rules` attempts to match claims from the token to map to a role.\n\nValid values are `Token` or `Rules` .", + "markdownDescription": "The role mapping type. Token will use `cognito:roles` and `cognito:preferred_role` claims from the Cognito identity provider token to map groups to roles. Rules will attempt to match claims from the token to map to a role.", "title": "Type", "type": "string" } @@ -42304,7 +42298,7 @@ "type": "array" }, "DeletionProtection": { - "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "title": "DeletionProtection", "type": "string" }, @@ -42636,7 +42630,7 @@ }, "PreTokenGenerationConfig": { "$ref": "#/definitions/AWS::Cognito::UserPool.PreTokenGenerationConfig", - "markdownDescription": "", + "markdownDescription": "The detailed configuration of a pre token generation trigger. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", "title": "PreTokenGenerationConfig" }, "UserMigration": { @@ -42719,12 +42713,12 @@ "additionalProperties": false, "properties": { "LambdaArn": { - "markdownDescription": "", + "markdownDescription": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nThis parameter and the `PreTokenGeneration` property of `LambdaConfig` have the same value. For new instances of pre token generation triggers, set `LambdaArn` .", "title": "LambdaArn", "type": "string" }, "LambdaVersion": { - "markdownDescription": "", + "markdownDescription": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.", "title": "LambdaVersion", "type": "string" } @@ -43373,7 +43367,7 @@ "type": "array" }, "ProviderDetails": { - "markdownDescription": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", + "markdownDescription": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", "title": "ProviderDetails", "type": "object" }, @@ -43912,7 +43906,7 @@ "properties": { "ClientMetadata": { "additionalProperties": true, - "markdownDescription": "A map of custom key-value pairs that you can provide as input for the custom workflow that is invoked by the *pre sign-up* trigger.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you create a `UserPoolUser` resource and include the `ClientMetadata` property, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata property. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing User Pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> Take the following limitations into consideration when you use the ClientMetadata parameter:\n> \n> - Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.\n> - Amazon Cognito does not validate the ClientMetadata value.\n> - Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.", + "markdownDescription": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing user pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:\n> \n> - Store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose.\n> - Validate the ClientMetadata value.\n> - Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -59362,7 +59356,7 @@ }, "TaskReportConfig": { "$ref": "#/definitions/AWS::DataSync::Task.TaskReportConfig", - "markdownDescription": "Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.", + "markdownDescription": "Specifies how you want to configure a task report, which provides detailed information about your DataSync transfer. For more information, see [Monitoring your DataSync transfers with task reports](https://docs.aws.amazon.com/datasync/latest/userguide/task-reports.html) .\n\nWhen using this parameter, your caller identity (the role that you're using DataSync with) must have the `iam:PassRole` permission. The [AWSDataSyncFullAccess](https://docs.aws.amazon.com/datasync/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-awsdatasyncfullaccess) policy includes this permission.", "title": "TaskReportConfig" } }, @@ -63719,7 +63713,7 @@ "title": "NetworkInterfaceCount" }, "OnDemandMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "markdownDescription": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "title": "OnDemandMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -63729,7 +63723,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for Spot Instance. This is the maximum you\u2019ll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -65909,7 +65903,7 @@ "type": "boolean" }, "AssociatePublicIpAddress": { - "markdownDescription": "Indicates whether to assign a public IPv4 address to an instance. Applies only if creating a network interface when launching an instance. The network interface must be the primary network interface. If launching into a default subnet, the default value is `true` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "markdownDescription": "Indicates whether to assign a public IPv4 address to an instance. Applies only if creating a network interface when launching an instance. The network interface must be the primary network interface. If launching into a default subnet, the default value is `true` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "title": "AssociatePublicIpAddress", "type": "boolean" }, @@ -66820,7 +66814,7 @@ "title": "NetworkInterfaceCount" }, "OnDemandMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "markdownDescription": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "title": "OnDemandMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -66830,7 +66824,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for Spot Instances. This is the maximum you\u2019ll pay for a Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -67226,7 +67220,7 @@ "type": "boolean" }, "AssociatePublicIpAddress": { - "markdownDescription": "Associates a public IPv4 address with eth0 for a new network interface.\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "markdownDescription": "Associates a public IPv4 address with eth0 for a new network interface.\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "title": "AssociatePublicIpAddress", "type": "boolean" }, @@ -71125,7 +71119,7 @@ "additionalProperties": false, "properties": { "AssociatePublicIpAddress": { - "markdownDescription": "Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is `true` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "markdownDescription": "Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is `true` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "title": "AssociatePublicIpAddress", "type": "boolean" }, @@ -71309,7 +71303,7 @@ "title": "NetworkInterfaceCount" }, "OnDemandMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "markdownDescription": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "title": "OnDemandMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -71319,7 +71313,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for Spot Instance. This is the maximum you\u2019ll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -71980,7 +71974,7 @@ "type": "number" }, "MapPublicIpOnLaunch": { - "markdownDescription": "Indicates whether instances launched in this subnet receive a public IPv4 address. The default value is `false` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "markdownDescription": "Indicates whether instances launched in this subnet receive a public IPv4 address. The default value is `false` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "title": "MapPublicIpOnLaunch", "type": "boolean" }, @@ -77458,7 +77452,7 @@ "additionalProperties": false, "properties": { "ContainerName": { - "markdownDescription": "The name of the container (as it appears in a container definition) to associate with the load balancer.", + "markdownDescription": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", "title": "ContainerName", "type": "string" }, @@ -78171,7 +78165,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.SystemControl" }, - "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.\n\nWe don't recommend that you specify network-related `systemControls` parameters for multiple containers in a single task that also uses either the `awsvpc` or `host` network mode. Doing this has the following disadvantages:\n\n- For tasks that use the `awsvpc` network mode including Fargate, if you set `systemControls` for any container, it applies to all containers in the task. If you set different `systemControls` for multiple containers in a single task, the container that's started last determines which `systemControls` take effect.\n- For tasks that use the `host` network mode, the network namespace `systemControls` aren't supported.\n\nIf you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see [IPC mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_definition_ipcmode) .\n\n- For tasks that use the `host` IPC mode, IPC namespace `systemControls` aren't supported.\n- For tasks that use the `task` IPC mode, IPC namespace `systemControls` values apply to all containers within a task.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "title": "SystemControls", "type": "array" }, @@ -79009,7 +79003,7 @@ "additionalProperties": false, "properties": { "ContainerName": { - "markdownDescription": "The name of the container (as it appears in a container definition) to associate with the load balancer.", + "markdownDescription": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", "title": "ContainerName", "type": "string" }, @@ -79342,7 +79336,7 @@ "type": "array" }, "PerformanceMode": { - "markdownDescription": "The Performance mode of the file system. We recommend `generalPurpose` performance mode for all file systems. File systems using the `maxIO` performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. The `maxIO` mode is not supported on One Zone file systems.\n\n> Due to the higher per-operation latencies with Max I/O, we recommend using General Purpose performance mode for all file systems. \n\nDefault is `generalPurpose` .", + "markdownDescription": "The performance mode of the file system. We recommend `generalPurpose` performance mode for all file systems. File systems using the `maxIO` performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. The `maxIO` mode is not supported on One Zone file systems.\n\n> Due to the higher per-operation latencies with Max I/O, we recommend using General Purpose performance mode for all file systems. \n\nDefault is `generalPurpose` .", "title": "PerformanceMode", "type": "string" }, @@ -87510,7 +87504,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::LoadBalancer.SubnetMapping" }, - "markdownDescription": "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", + "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", "title": "SubnetMappings", "type": "array" }, @@ -87518,7 +87512,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", + "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", "title": "Subnets", "type": "array" }, @@ -87809,7 +87803,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", "title": "Key", "type": "string" }, @@ -92909,7 +92903,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "markdownDescription": "A list of `Tag` values, with a maximum of 50 elements.", "title": "Tags", "type": "array" } @@ -93071,7 +93065,7 @@ "type": "array" }, "StorageCapacity": { - "markdownDescription": "Sets the storage capacity of the file system that you're creating.\n\n`StorageCapacity` is required if you are creating a new file system.\n\n*FSx for Lustre file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` and the Lustre `DeploymentType` , as follows:\n\n- For `SCRATCH_2` , `PERSISTENT_2` and `PERSISTENT_1` deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.\n- For `PERSISTENT_1` HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.\n- For `SCRATCH_1` deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.\n\n*FSx for ONTAP file systems* - The amount of storage capacity that you can configure is from 1024 GiB up to 196,608 GiB (192 TiB).\n\n*FSx for OpenZFS file systems* - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). If you are creating a file system from a backup, you can specify a storage capacity equal to or greater than the original file system's storage capacity.\n\n*FSx for Windows File Server file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` as follows:\n\n- For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).\n- For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).", + "markdownDescription": "Sets the storage capacity of the file system that you're creating.\n\n`StorageCapacity` is required if you are creating a new file system. It is not required if you are creating a file system by restoring a backup.\n\n*FSx for Lustre file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` and the Lustre `DeploymentType` , as follows:\n\n- For `SCRATCH_2` , `PERSISTENT_2` and `PERSISTENT_1` deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.\n- For `PERSISTENT_1` HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.\n- For `SCRATCH_1` deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.\n\n*FSx for ONTAP file systems* - The amount of storage capacity that you can configure is from 1024 GiB up to 196,608 GiB (192 TiB).\n\n*FSx for OpenZFS file systems* - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). If you are creating a file system from a backup, you can specify a storage capacity equal to or greater than the original file system's storage capacity.\n\n*FSx for Windows File Server file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` as follows:\n\n- For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).\n- For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).", "title": "StorageCapacity", "type": "number" }, @@ -93092,7 +93086,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "markdownDescription": "The tags to associate with the file system. For more information, see [Tagging your Amazon FSx resources](https://docs.aws.amazon.com/fsx/latest/LustreGuide/tag-resources.html) in the *Amazon FSx for Lustre User Guide* .", "title": "Tags", "type": "array" }, @@ -93203,7 +93197,7 @@ "type": "number" }, "CopyTagsToBackups": { - "markdownDescription": "A Boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. Only valid for use with `PERSISTENT_1` deployment types.", + "markdownDescription": "(Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If `CopyTagsToBackups` is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If `CopyTagsToBackups` is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.\n\n(Default = `false` )\n\nFor more information, see [Working with backups](https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html) in the *Amazon FSx for Lustre User Guide* .", "title": "CopyTagsToBackups", "type": "boolean" }, @@ -93303,7 +93297,7 @@ "type": "string" }, "HAPairs": { - "markdownDescription": "", + "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "title": "HAPairs", "type": "number" }, @@ -93326,7 +93320,7 @@ "type": "number" }, "ThroughputCapacityPerHAPair": { - "markdownDescription": "", + "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "title": "ThroughputCapacityPerHAPair", "type": "number" }, @@ -93639,7 +93633,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "markdownDescription": "A list of `Tag` values, with a maximum of 50 elements.", "title": "Tags", "type": "array" }, @@ -93727,7 +93721,7 @@ "type": "string" }, "RootVolumeSecurityStyle": { - "markdownDescription": "The security style of the root volume of the SVM. Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.", + "markdownDescription": "The security style of the root volume of the SVM. Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Microsoft Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Microsoft Windows user as the service account.\n- `MIXED` This is an advanced setting. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style.html) in the Amazon FSx for NetApp ONTAP User Guide.", "title": "RootVolumeSecurityStyle", "type": "string" }, @@ -93740,7 +93734,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "markdownDescription": "A list of `Tag` values, with a maximum of 50 elements.", "title": "Tags", "type": "array" } @@ -93782,7 +93776,7 @@ }, "SelfManagedActiveDirectoryConfiguration": { "$ref": "#/definitions/AWS::FSx::StorageVirtualMachine.SelfManagedActiveDirectoryConfiguration", - "markdownDescription": "The configuration that Amazon FSx uses to join the ONTAP storage virtual machine (SVM) to your self-managed (including on-premises) Microsoft Active Directory (AD) directory.", + "markdownDescription": "The configuration that Amazon FSx uses to join the ONTAP storage virtual machine (SVM) to your self-managed (including on-premises) Microsoft Active Directory directory.", "title": "SelfManagedActiveDirectoryConfiguration" } }, @@ -109789,7 +109783,7 @@ }, "Tags": { "additionalProperties": true, - "markdownDescription": "The tags of the image.", + "markdownDescription": "The tags that apply to this image.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110682,12 +110676,12 @@ "type": "string" }, "ExecutionRole": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the IAM role that Image Builder uses to run the lifecycle policy. This is a custom role that you create.", + "markdownDescription": "The name or Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access to run lifecycle actions.", "title": "ExecutionRole", "type": "string" }, "Name": { - "markdownDescription": "The name of the lifecycle policy.", + "markdownDescription": "The name of the lifecycle policy to create.", "title": "Name", "type": "string" }, @@ -110695,17 +110689,17 @@ "items": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.PolicyDetail" }, - "markdownDescription": "The configuration details for a lifecycle policy resource.", + "markdownDescription": "Configuration details for the lifecycle policy rules.", "title": "PolicyDetails", "type": "array" }, "ResourceSelection": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.ResourceSelection", - "markdownDescription": "Resource selection criteria used to run the lifecycle policy.", + "markdownDescription": "Selection criteria for the resources that the lifecycle policy applies to.", "title": "ResourceSelection" }, "ResourceType": { - "markdownDescription": "The type of resources the lifecycle policy targets.", + "markdownDescription": "The type of Image Builder resource that the lifecycle policy applies to.", "title": "ResourceType", "type": "string" }, @@ -110716,7 +110710,7 @@ }, "Tags": { "additionalProperties": true, - "markdownDescription": "To help manage your lifecycle policy resources, you can assign your own metadata to each resource in the form of tags. Each tag consists of a key and an optional value, both of which you define.", + "markdownDescription": "Tags to apply to the lifecycle policy resource.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110761,11 +110755,11 @@ "properties": { "IncludeResources": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.IncludeResources", - "markdownDescription": "", + "markdownDescription": "Specifies the resources that the lifecycle policy applies to.", "title": "IncludeResources" }, "Type": { - "markdownDescription": "", + "markdownDescription": "Specifies the lifecycle action to take.", "title": "Type", "type": "string" } @@ -110779,20 +110773,20 @@ "additionalProperties": false, "properties": { "IsPublic": { - "markdownDescription": "", + "markdownDescription": "Configures whether public AMIs are excluded from the lifecycle action.", "title": "IsPublic", "type": "boolean" }, "LastLaunched": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.LastLaunched", - "markdownDescription": "", + "markdownDescription": "Specifies configuration details for Image Builder to exclude the most recent resources from lifecycle actions.", "title": "LastLaunched" }, "Regions": { "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Configures AWS Region s that are excluded from the lifecycle action.", "title": "Regions", "type": "array" }, @@ -110800,13 +110794,13 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Specifies AWS account s whose resources are excluded from the lifecycle action.", "title": "SharedAccounts", "type": "array" }, "TagMap": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "Lists tags that should be excluded from lifecycle actions for the AMIs that have them.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110823,12 +110817,12 @@ "properties": { "Amis": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.AmiExclusionRules", - "markdownDescription": "", + "markdownDescription": "Lists configuration values that apply to AMIs that Image Builder should exclude from the lifecycle action.", "title": "Amis" }, "TagMap": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "Contains a list of tags that Image Builder uses to skip lifecycle actions for Image Builder image resources that have them.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110844,22 +110838,22 @@ "additionalProperties": false, "properties": { "RetainAtLeast": { - "markdownDescription": "", + "markdownDescription": "For age-based filters, this is the number of resources to keep on hand after the lifecycle `DELETE` action is applied. Impacted resources are only deleted if you have more than this number of resources. If you have fewer resources than this number, the impacted resource is not deleted.", "title": "RetainAtLeast", "type": "number" }, "Type": { - "markdownDescription": "", + "markdownDescription": "Filter resources based on either `age` or `count` .", "title": "Type", "type": "string" }, "Unit": { - "markdownDescription": "", + "markdownDescription": "Defines the unit of time that the lifecycle policy uses to determine impacted resources. This is required for age-based rules.", "title": "Unit", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "The number of units for the time period or for the count. For example, a value of `6` might refer to six months or six AMIs.\n\n> For count-based filters, this value represents the minimum number of resources to keep on hand. If you have fewer resources than this number, the resource is excluded from lifecycle actions.", "title": "Value", "type": "number" } @@ -110874,17 +110868,17 @@ "additionalProperties": false, "properties": { "Amis": { - "markdownDescription": "", + "markdownDescription": "Specifies whether the lifecycle action should apply to distributed AMIs.", "title": "Amis", "type": "boolean" }, "Containers": { - "markdownDescription": "", + "markdownDescription": "Specifies whether the lifecycle action should apply to distributed containers.", "title": "Containers", "type": "boolean" }, "Snapshots": { - "markdownDescription": "", + "markdownDescription": "Specifies whether the lifecycle action should apply to snapshots associated with distributed AMIs.", "title": "Snapshots", "type": "boolean" } @@ -110895,12 +110889,12 @@ "additionalProperties": false, "properties": { "Unit": { - "markdownDescription": "", + "markdownDescription": "Defines the unit of time that the lifecycle policy uses to calculate elapsed time since the last instance launched from the AMI. For example: days, weeks, months, or years.", "title": "Unit", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "The integer number of units for the time period. For example `6` (months).", "title": "Value", "type": "number" } @@ -110916,17 +110910,17 @@ "properties": { "Action": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.Action", - "markdownDescription": "", + "markdownDescription": "Configuration details for the policy action.", "title": "Action" }, "ExclusionRules": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.ExclusionRules", - "markdownDescription": "", + "markdownDescription": "Additional rules to specify resources that should be exempt from policy actions.", "title": "ExclusionRules" }, "Filter": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.Filter", - "markdownDescription": "", + "markdownDescription": "Specifies the resources that the lifecycle policy applies to.", "title": "Filter" } }, @@ -110940,12 +110934,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "", + "markdownDescription": "The name of an Image Builder recipe that the lifecycle policy uses for resource selection.", "title": "Name", "type": "string" }, "SemanticVersion": { - "markdownDescription": "", + "markdownDescription": "The version of the Image Builder recipe specified by the `name` field.", "title": "SemanticVersion", "type": "string" } @@ -110962,13 +110956,13 @@ "items": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.RecipeSelection" }, - "markdownDescription": "", + "markdownDescription": "A list of recipes that are used as selection criteria for the output images that the lifecycle policy applies to.", "title": "Recipes", "type": "array" }, "TagMap": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "A list of tags that are used as selection criteria for the Image Builder image resources that the lifecycle policy applies to.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -111021,28 +111015,28 @@ "type": "string" }, "Data": { - "markdownDescription": "Contains the YAML document content for the workflow.", + "markdownDescription": "Contains the UTF-8 encoded YAML document content for the workflow. Alternatively, you can specify the `uri` of a YAML document file stored in Amazon S3. However, you cannot specify both properties.", "title": "Data", "type": "string" }, "Description": { - "markdownDescription": "The description of the workflow.", + "markdownDescription": "Describes the workflow.", "title": "Description", "type": "string" }, "KmsKeyId": { - "markdownDescription": "The KMS key identifier used to encrypt the workflow resource.", + "markdownDescription": "The ID of the KMS key that is used to encrypt this workflow resource.", "title": "KmsKeyId", "type": "string" }, "Name": { - "markdownDescription": "The name of the workflow resource.", + "markdownDescription": "The name of the workflow to create.", "title": "Name", "type": "string" }, "Tags": { "additionalProperties": true, - "markdownDescription": "The tags that apply to the workflow resource", + "markdownDescription": "Tags that apply to the workflow resource.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -111052,17 +111046,17 @@ "type": "object" }, "Type": { - "markdownDescription": "Specifies the image creation stage that the workflow applies to. Image Builder currently supports build and test workflows.", + "markdownDescription": "The phase in the image build process for which the workflow resource is responsible.", "title": "Type", "type": "string" }, "Uri": { - "markdownDescription": "", + "markdownDescription": "The `uri` of a YAML component document file. This must be an S3 URL ( `s3://bucket/key` ), and the requester must have permission to access the S3 bucket it points to. If you use Amazon S3, you can specify component content up to your service quota.\n\nAlternatively, you can specify the YAML document inline, using the component `data` property. You cannot specify both properties.", "title": "Uri", "type": "string" }, "Version": { - "markdownDescription": "The workflow resource version. Workflow resources are immutable. To make a change, you can clone a workflow or create a new version.", + "markdownDescription": "The semantic version of this workflow resource. The semantic version syntax adheres to the following rules.\n\n> The semantic version has four nodes: ../. You can assign values for the first three, and can filter on all of them.\n> \n> *Assignment:* For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.\n> \n> *Patterns:* You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.", "title": "Version", "type": "string" } @@ -124345,7 +124339,7 @@ }, "SidewalkResponse": { "$ref": "#/definitions/AWS::IoTWireless::PartnerAccount.SidewalkAccountInfoWithFingerprint", - "markdownDescription": "", + "markdownDescription": "Information about a Sidewalk account.", "title": "SidewalkResponse" }, "SidewalkUpdate": { @@ -124896,13 +124890,13 @@ "additionalProperties": false, "properties": { "DevAddr": { - "markdownDescription": "", + "markdownDescription": "The DevAddr value.", "title": "DevAddr", "type": "string" }, "SessionKeys": { "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.SessionKeysAbpV10x", - "markdownDescription": "", + "markdownDescription": "Session keys for ABP v1.0.x.", "title": "SessionKeys" } }, @@ -124937,7 +124931,7 @@ "properties": { "AbpV10x": { "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.AbpV10x", - "markdownDescription": "", + "markdownDescription": "ABP device object for LoRaWAN specification v1.0.x.", "title": "AbpV10x" }, "AbpV11": { @@ -124977,12 +124971,12 @@ "additionalProperties": false, "properties": { "AppEui": { - "markdownDescription": "", + "markdownDescription": "The AppEUI value. You specify this value when using LoRaWAN versions v1.0.2 or v1.0.3.", "title": "AppEui", "type": "string" }, "AppKey": { - "markdownDescription": "", + "markdownDescription": "The AppKey value.", "title": "AppKey", "type": "string" } @@ -125023,12 +125017,12 @@ "additionalProperties": false, "properties": { "AppSKey": { - "markdownDescription": "", + "markdownDescription": "The AppSKey value.", "title": "AppSKey", "type": "string" }, "NwkSKey": { - "markdownDescription": "", + "markdownDescription": "The NwkKey value.", "title": "NwkSKey", "type": "string" } @@ -130782,16 +130776,16 @@ }, "RetryOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.AmazonOpenSearchServerlessRetryOptions", - "markdownDescription": "The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).", + "markdownDescription": "The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).", "title": "RetryOptions" }, "RoleARN": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.", "title": "RoleARN", "type": "string" }, "S3BackupMode": { - "markdownDescription": "Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.", + "markdownDescription": "Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.", "title": "S3BackupMode", "type": "string" }, @@ -130817,7 +130811,7 @@ "additionalProperties": false, "properties": { "DurationInSeconds": { - "markdownDescription": "After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.", + "markdownDescription": "After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.", "title": "DurationInSeconds", "type": "number" } @@ -130860,7 +130854,7 @@ }, "DocumentIdOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.DocumentIdOptions", - "markdownDescription": "Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.", + "markdownDescription": "Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.", "title": "DocumentIdOptions" }, "DomainARN": { @@ -131023,12 +131017,12 @@ }, "InputFormatConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.InputFormatConfiguration", - "markdownDescription": "Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if `Enabled` is set to true.", + "markdownDescription": "Specifies the deserializer that you want Firehose to use to convert the format of your data from JSON. This parameter is required if `Enabled` is set to true.", "title": "InputFormatConfiguration" }, "OutputFormatConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.OutputFormatConfiguration", - "markdownDescription": "Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if `Enabled` is set to true.", + "markdownDescription": "Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if `Enabled` is set to true.", "title": "OutputFormatConfiguration" }, "SchemaConfiguration": { @@ -131043,7 +131037,7 @@ "additionalProperties": false, "properties": { "KeyARN": { - "markdownDescription": "If you set `KeyType` to `CUSTOMER_MANAGED_CMK` , you must specify the Amazon Resource Name (ARN) of the CMK. If you set `KeyType` to `AWS _OWNED_CMK` , Kinesis Data Firehose uses a service-account CMK.", + "markdownDescription": "If you set `KeyType` to `CUSTOMER_MANAGED_CMK` , you must specify the Amazon Resource Name (ARN) of the CMK. If you set `KeyType` to `AWS _OWNED_CMK` , Firehose uses a service-account CMK.", "title": "KeyARN", "type": "string" }, @@ -131063,12 +131057,12 @@ "properties": { "HiveJsonSerDe": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.HiveJsonSerDe", - "markdownDescription": "The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", + "markdownDescription": "The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", "title": "HiveJsonSerDe" }, "OpenXJsonSerDe": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.OpenXJsonSerDe", - "markdownDescription": "The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.", + "markdownDescription": "The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.", "title": "OpenXJsonSerDe" } }, @@ -131078,7 +131072,7 @@ "additionalProperties": false, "properties": { "DefaultDocumentIdFormat": { - "markdownDescription": "When the `FIREHOSE_DEFAULT` option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.", + "markdownDescription": "When the `FIREHOSE_DEFAULT` option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.", "title": "DefaultDocumentIdFormat", "type": "string" } @@ -131140,7 +131134,7 @@ }, "DocumentIdOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.DocumentIdOptions", - "markdownDescription": "Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.", + "markdownDescription": "Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.", "title": "DocumentIdOptions" }, "DomainARN": { @@ -131310,7 +131304,7 @@ "items": { "type": "string" }, - "markdownDescription": "Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://docs.aws.amazon.com/https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) . You can also use the special value `millis` to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses `java.sql.Timestamp::valueOf` by default.", + "markdownDescription": "Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://docs.aws.amazon.com/https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) . You can also use the special value `millis` to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses `java.sql.Timestamp::valueOf` by default.", "title": "TimestampFormats", "type": "array" } @@ -131510,7 +131504,7 @@ "additionalProperties": false, "properties": { "CaseInsensitive": { - "markdownDescription": "When set to `true` , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.", + "markdownDescription": "When set to `true` , which is the default, Firehose converts JSON keys to lowercase before deserializing them.", "title": "CaseInsensitive", "type": "boolean" }, @@ -131526,7 +131520,7 @@ "type": "object" }, "ConvertDotsInJsonKeysToUnderscores": { - "markdownDescription": "When set to `true` , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.\n\nThe default is `false` .", + "markdownDescription": "When set to `true` , specifies that the names of the keys include dots and that you want Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.\n\nThe default is `false` .", "title": "ConvertDotsInJsonKeysToUnderscores", "type": "boolean" } @@ -131537,7 +131531,7 @@ "additionalProperties": false, "properties": { "BlockSizeBytes": { - "markdownDescription": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.", + "markdownDescription": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.", "title": "BlockSizeBytes", "type": "number" }, @@ -131545,7 +131539,7 @@ "items": { "type": "string" }, - "markdownDescription": "The column names for which you want Kinesis Data Firehose to create bloom filters. The default is `null` .", + "markdownDescription": "The column names for which you want Firehose to create bloom filters. The default is `null` .", "title": "BloomFilterColumns", "type": "array" }, @@ -131607,7 +131601,7 @@ "additionalProperties": false, "properties": { "BlockSizeBytes": { - "markdownDescription": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.", + "markdownDescription": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.", "title": "BlockSizeBytes", "type": "number" }, @@ -131730,7 +131724,7 @@ }, "RetryOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.RedshiftRetryOptions", - "markdownDescription": "The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).", + "markdownDescription": "The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).", "title": "RetryOptions" }, "RoleARN": { @@ -131773,7 +131767,7 @@ "additionalProperties": false, "properties": { "DurationInSeconds": { - "markdownDescription": "The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of `DurationInSeconds` is 0 (zero) or if the first delivery attempt takes longer than the current value.", + "markdownDescription": "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of `DurationInSeconds` is 0 (zero) or if the first delivery attempt takes longer than the current value.", "title": "DurationInSeconds", "type": "number" } @@ -131860,7 +131854,7 @@ "type": "string" }, "RoleARN": { - "markdownDescription": "The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `RoleARN` property is required and its value must be specified.", + "markdownDescription": "The role that Firehose can use to access AWS Glue. This role must be in the same account you use for Firehose. Cross-account roles aren't allowed.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `RoleARN` property is required and its value must be specified.", "title": "RoleARN", "type": "string" }, @@ -131870,7 +131864,7 @@ "type": "string" }, "VersionId": { - "markdownDescription": "Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to `LATEST` , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.", + "markdownDescription": "Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to `LATEST` , Firehose uses the most recent version. This means that any updates to the table are automatically picked up.", "title": "VersionId", "type": "string" } @@ -131923,12 +131917,12 @@ "title": "CloudWatchLoggingOptions" }, "HECAcknowledgmentTimeoutInSeconds": { - "markdownDescription": "The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.", + "markdownDescription": "The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.", "title": "HECAcknowledgmentTimeoutInSeconds", "type": "number" }, "HECEndpoint": { - "markdownDescription": "The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.", + "markdownDescription": "The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.", "title": "HECEndpoint", "type": "string" }, @@ -131949,11 +131943,11 @@ }, "RetryOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SplunkRetryOptions", - "markdownDescription": "The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.", + "markdownDescription": "The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.", "title": "RetryOptions" }, "S3BackupMode": { - "markdownDescription": "Defines how documents should be delivered to Amazon S3. When set to `FailedEventsOnly` , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to `AllEvents` , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is `FailedEventsOnly` .\n\nYou can update this backup mode from `FailedEventsOnly` to `AllEvents` . You can't update it from `AllEvents` to `FailedEventsOnly` .", + "markdownDescription": "Defines how documents should be delivered to Amazon S3. When set to `FailedEventsOnly` , Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to `AllEvents` , Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is `FailedEventsOnly` .\n\nYou can update this backup mode from `FailedEventsOnly` to `AllEvents` . You can't update it from `AllEvents` to `FailedEventsOnly` .", "title": "S3BackupMode", "type": "string" }, @@ -131975,7 +131969,7 @@ "additionalProperties": false, "properties": { "DurationInSeconds": { - "markdownDescription": "The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.", + "markdownDescription": "The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt.", "title": "DurationInSeconds", "type": "number" } @@ -133853,7 +133847,7 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.", + "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "title": "Destination", "type": "string" } @@ -133929,7 +133923,7 @@ }, "DestinationConfig": { "$ref": "#/definitions/AWS::Lambda::EventSourceMapping.DestinationConfig", - "markdownDescription": "(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.", + "markdownDescription": "(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.", "title": "DestinationConfig" }, "DocumentDBEventSourceConfig": { @@ -134152,7 +134146,7 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.", + "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "title": "Destination", "type": "string" } @@ -141726,7 +141720,7 @@ "type": "array" }, "Name": { - "markdownDescription": "A name for the query definition.", + "markdownDescription": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `/ *folder-name* / *query-name*` .", "title": "Name", "type": "string" }, @@ -161786,12 +161780,12 @@ "properties": { "BufferOptions": { "$ref": "#/definitions/AWS::OSIS::Pipeline.BufferOptions", - "markdownDescription": "Options that specify the configuration of a persistent buffer. To configure how OpenSearch Ingestion encrypts this data, set the EncryptionAtRestOptions.", + "markdownDescription": "Options that specify the configuration of a persistent buffer. To configure how OpenSearch Ingestion encrypts this data, set the `EncryptionAtRestOptions` . For more information, see [Persistent buffering](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/osis-features-overview.html#persistent-buffering) .", "title": "BufferOptions" }, "EncryptionAtRestOptions": { "$ref": "#/definitions/AWS::OSIS::Pipeline.EncryptionAtRestOptions", - "markdownDescription": "Options to control how OpenSearch encrypts all data-at-rest.", + "markdownDescription": "Options to control how OpenSearch encrypts buffer data.", "title": "EncryptionAtRestOptions" }, "LogPublishingOptions": { @@ -161880,7 +161874,7 @@ "additionalProperties": false, "properties": { "LogGroup": { - "markdownDescription": "The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, `/aws/OpenSearchService/IngestionService/my-pipeline` .", + "markdownDescription": "The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, `/aws/vendedlogs/OpenSearchService/pipelines` .", "title": "LogGroup", "type": "string" } @@ -161894,7 +161888,7 @@ "additionalProperties": false, "properties": { "KmsKeyArn": { - "markdownDescription": "The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion. By default, data is encrypted using an AWS owned key.", + "markdownDescription": "The ARN of the KMS key used to encrypt buffer data. By default, data is encrypted using an AWS owned key.", "title": "KmsKeyArn", "type": "string" } @@ -163020,7 +163014,7 @@ "type": "string" }, "StandbyReplicas": { - "markdownDescription": "Details about an OpenSearch Serverless collection.", + "markdownDescription": "Indicates whether standby replicas should be used for a collection.", "title": "StandbyReplicas", "type": "string" }, @@ -170341,7 +170335,7 @@ "type": "string" }, "DestinationStreamArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis data stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", "title": "DestinationStreamArn", "type": "string" }, @@ -214844,12 +214838,12 @@ }, "ScalingConfiguration": { "$ref": "#/definitions/AWS::RDS::DBCluster.ScalingConfiguration", - "markdownDescription": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, use `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", "title": "ScalingConfiguration" }, "ServerlessV2ScalingConfiguration": { "$ref": "#/definitions/AWS::RDS::DBCluster.ServerlessV2ScalingConfiguration", - "markdownDescription": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, use `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", "title": "ServerlessV2ScalingConfiguration" }, "SnapshotIdentifier": { @@ -233951,7 +233945,7 @@ "properties": { "FileSystemConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.FileSystemConfig", - "markdownDescription": "The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.", + "markdownDescription": "The Amazon Elastic File System storage configuration for a SageMaker image.", "title": "FileSystemConfig" }, "KernelSpecs": { @@ -234878,7 +234872,7 @@ }, "DefaultSpaceSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.DefaultSpaceSettings", - "markdownDescription": "A collection of settings that apply to spaces created in the Domain.", + "markdownDescription": "A collection of settings that apply to spaces created in the domain.", "title": "DefaultSpaceSettings" }, "DefaultUserSettings": { @@ -234958,7 +234952,7 @@ "properties": { "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", - "markdownDescription": "", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "title": "DefaultResourceSpec" }, "LifecycleConfigArns": { @@ -235084,7 +235078,7 @@ "items": { "type": "string" }, - "markdownDescription": "The security group IDs for the Amazon Virtual Private Cloud that the space uses for communication.", + "markdownDescription": "The security group IDs for the Amazon VPC that the space uses for communication.", "title": "SecurityGroups", "type": "array" } @@ -235164,7 +235158,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", - "markdownDescription": "", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", "title": "DefaultResourceSpec" }, "LifecycleConfigArns": { @@ -235202,7 +235196,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -239325,7 +239319,7 @@ }, "EndpointInput": { "$ref": "#/definitions/AWS::SageMaker::ModelExplainabilityJobDefinition.EndpointInput", - "markdownDescription": "", + "markdownDescription": "Input object for the endpoint", "title": "EndpointInput" } }, @@ -242316,7 +242310,7 @@ "additionalProperties": false, "properties": { "DomainId": { - "markdownDescription": "The ID of the associated Domain.", + "markdownDescription": "The ID of the associated domain.", "title": "DomainId", "type": "string" }, @@ -242415,7 +242409,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -242559,7 +242553,7 @@ "properties": { "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", - "markdownDescription": "", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "title": "DefaultResourceSpec" }, "LifecycleConfigArns": { @@ -242714,7 +242708,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", - "markdownDescription": "", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", "title": "DefaultResourceSpec" }, "LifecycleConfigArns": { @@ -242752,7 +242746,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -242955,7 +242949,7 @@ "type": "array" }, "WorkforceName": { - "markdownDescription": "", + "markdownDescription": "The name of the workforce.", "title": "WorkforceName", "type": "string" }, @@ -259589,7 +259583,7 @@ "type": "array" }, "UserName": { - "markdownDescription": "The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.", + "markdownDescription": "The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.\n\nThe reserved keyword, `[UNDEFINED]` , is used when creating user-decoupled WorkSpaces.", "title": "UserName", "type": "string" }, @@ -259949,7 +259943,7 @@ "properties": { "IdentityProviderDetails": { "additionalProperties": true, - "markdownDescription": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- For Facebook:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- `api_version`\n- For Sign in with Apple:\n\n- `client_id`\n- `team_id`\n- `key_id`\n- `private_key`\n- `authorize_scopes`\n- For OIDC providers:\n\n- `client_id`\n- `client_secret`\n- `attributes_request_method`\n- `oidc_issuer`\n- `authorize_scopes`\n- `authorize_url` *if not available from discovery URL specified by oidc_issuer key*\n- `token_url` *if not available from discovery URL specified by oidc_issuer key*\n- `attributes_url` *if not available from discovery URL specified by oidc_issuer key*\n- `jwks_uri` *if not available from discovery URL specified by oidc_issuer key*\n- For SAML providers:\n\n- `MetadataFile` OR `MetadataURL`\n- `IDPSignout` *optional*", + "markdownDescription": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- For Facebook:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- `api_version`\n- For Sign in with Apple:\n\n- `client_id`\n- `team_id`\n- `key_id`\n- `private_key`\n- `authorize_scopes`\n- For OIDC providers:\n\n- `client_id`\n- `client_secret`\n- `attributes_request_method`\n- `oidc_issuer`\n- `authorize_scopes`\n- `authorize_url` *if not available from discovery URL specified by oidc_issuer key*\n- `token_url` *if not available from discovery URL specified by oidc_issuer key*\n- `attributes_url` *if not available from discovery URL specified by oidc_issuer key*\n- `jwks_uri` *if not available from discovery URL specified by oidc_issuer key*\n- For SAML providers:\n\n- `MetadataFile` OR `MetadataURL`\n- `IDPSignout` (boolean) *optional*\n- `IDPInit` (boolean) *optional*\n- `RequestSigningAlgorithm` (string) *optional* - Only accepts `rsa-sha256`\n- `EncryptedResponses` (boolean) *optional*", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 8270abfe6..b22b27d64 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -114,7 +114,7 @@ "CustomObjectIdentifier": "An object identifier (OID) specifying the `AccessMethod` . The OID must satisfy the regular expression shown below. For more information, see NIST's definition of [Object Identifier (OID)](https://docs.aws.amazon.com/https://csrc.nist.gov/glossary/term/Object_Identifier) ." }, "AWS::ACMPCA::CertificateAuthority CrlConfiguration": { - "CrlDistributionPointExtensionConfiguration": "", + "CrlDistributionPointExtensionConfiguration": "Configures the default behavior of the CRL Distribution Point extension for certificates issued by your CA. If this field is not provided, then the CRL Distribution Point extension will be present and contain the default CRL URL.", "CustomCname": "Name inserted into the certificate *CRL Distribution Points* extension that enables the use of an alias for the CRL distribution point. Use this value if you don't want the name of your S3 bucket to be public.\n\n> The content of a Canonical Name (CNAME) record must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in URIs. Additionally, the value of the CNAME must not include a protocol prefix such as \"http://\" or \"https://\".", "Enabled": "Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. You can use this value to enable certificate revocation for a new CA when you call the `CreateCertificateAuthority` operation or for an existing CA when you call the `UpdateCertificateAuthority` operation.", "ExpirationInDays": "Validity period of the CRL in days.", @@ -122,7 +122,7 @@ "S3ObjectAcl": "Determines whether the CRL will be publicly readable or privately held in the CRL Amazon S3 bucket. If you choose PUBLIC_READ, the CRL will be accessible over the public internet. If you choose BUCKET_OWNER_FULL_CONTROL, only the owner of the CRL S3 bucket can access the CRL, and your PKI clients may need an alternative method of access.\n\nIf no value is specified, the default is PUBLIC_READ.\n\n*Note:* This default can cause CA creation to fail in some circumstances. If you have have enabled the Block Public Access (BPA) feature in your S3 account, then you must specify the value of this parameter as `BUCKET_OWNER_FULL_CONTROL` , and not doing so results in an error. If you have disabled BPA in S3, then you can specify either `BUCKET_OWNER_FULL_CONTROL` or `PUBLIC_READ` as the value.\n\nFor more information, see [Blocking public access to the S3 bucket](https://docs.aws.amazon.com/privateca/latest/userguide/PcaCreateCa.html#s3-bpa) ." }, "AWS::ACMPCA::CertificateAuthority CrlDistributionPointExtensionConfiguration": { - "OmitExtension": "" + "OmitExtension": "Configures whether the CRL Distribution Point extension should be populated with the default URL to the CRL. If set to `true` , then the CDP extension will not be present in any certificates issued by that CA unless otherwise specified through CSR or API passthrough.\n\n> Only set this if you have another way to distribute the CRL Distribution Points for certificates issued by your CA, such as the Matter Distributed Compliance Ledger.\n> \n> This configuration cannot be enabled with a custom CNAME set." }, "AWS::ACMPCA::CertificateAuthority CsrExtensions": { "KeyUsage": "Indicates the purpose of the certificate and of the key contained in the certificate.", @@ -329,6 +329,7 @@ "ConsoleAccess": "Enables access to the ActiveMQ web console for the ActiveMQ user. Does not apply to RabbitMQ brokers.", "Groups": "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long. Does not apply to RabbitMQ brokers.", "Password": "The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).", + "ReplicationUser": "Defines if this user is intended for CRDR replication purposes.", "Username": "The username of the broker user. For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.\n\n> Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other AWS services, including CloudWatch Logs . Broker usernames are not intended to be used for private or sensitive data." }, "AWS::AmazonMQ::Configuration": { @@ -484,6 +485,7 @@ "Key": "The storage key for an Amazon S3 bucket.", "Model": "An Amplify DataStore model.", "Predicates": "A list of predicates for binding a component's properties to data.", + "SlotName": "The name of a component slot.", "UserAttribute": "An authenticated user attribute." }, "AWS::AmplifyUIBuilder::Component ComponentChild": { @@ -491,7 +493,8 @@ "ComponentType": "The type of the child component.", "Events": "Describes the events that can be raised on the child component. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components.", "Name": "The name of the child component.", - "Properties": "Describes the properties of the child component. You can't specify `tags` as a valid property for `properties` ." + "Properties": "Describes the properties of the child component. You can't specify `tags` as a valid property for `properties` .", + "SourceId": "The unique ID of the child component in its original source system, such as Figma." }, "AWS::AmplifyUIBuilder::Component ComponentConditionProperty": { "Else": "The value to assign to the property if the condition is not met.", @@ -510,6 +513,7 @@ }, "AWS::AmplifyUIBuilder::Component ComponentEvent": { "Action": "The action to perform when a specific event is raised.", + "BindingEvent": "Binds an event to an action on a component. When you specify a `bindingEvent` , the event is called when the action is performed.", "Parameters": "Describes information about the action." }, "AWS::AmplifyUIBuilder::Component ComponentProperty": { @@ -550,6 +554,7 @@ "And": "A list of predicates to combine logically.", "Field": "The field to query.", "Operand": "The value to use when performing the evaluation.", + "OperandType": "The type of value to use when performing the evaluation.", "Operator": "The operator to use to perform the evaluation.", "Or": "A list of predicates to combine logically." }, @@ -630,9 +635,22 @@ "DataSourceType": "The data source type, either an Amplify DataStore model or a custom data type.", "DataTypeName": "The unique name of the data type you are using as the data source for the form." }, + "AWS::AmplifyUIBuilder::Form FormInputBindingPropertiesValue": { + "BindingProperties": "Describes the properties to customize with data at runtime.", + "Type": "The property type." + }, + "AWS::AmplifyUIBuilder::Form FormInputBindingPropertiesValueProperties": { + "Model": "An Amplify DataStore model." + }, "AWS::AmplifyUIBuilder::Form FormInputValueProperty": { + "BindingProperties": "The information to bind fields to data at runtime.", + "Concat": "A list of form properties to concatenate to create the value to assign to this field property.", "Value": "The value to assign to the input field." }, + "AWS::AmplifyUIBuilder::Form FormInputValuePropertyBindingProperties": { + "Field": "The data field to bind the property to.", + "Property": "The form property to bind to the data field." + }, "AWS::AmplifyUIBuilder::Form FormStyle": { "HorizontalGap": "The spacing for the horizontal gap.", "OuterPadding": "The size of the outer padding for the form.", @@ -655,6 +673,7 @@ "Value": "The complex object." }, "AWS::AmplifyUIBuilder::Form ValueMappings": { + "BindingProperties": "The information to bind fields to data at runtime.", "Values": "The value and display value pairs." }, "AWS::AmplifyUIBuilder::Theme": { @@ -755,7 +774,7 @@ }, "AWS::ApiGateway::Deployment StageDescription": { "AccessLogSetting": "Specifies settings for logging access in this stage.", - "CacheClusterEnabled": "Specifies whether a cache cluster is enabled for the stage.", + "CacheClusterEnabled": "Specifies whether a cache cluster is enabled for the stage. To activate a method-level cache, set `CachingEnabled` to `true` for a method.", "CacheClusterSize": "The size of the stage's cache cluster. For more information, see [cacheClusterSize](https://docs.aws.amazon.com/apigateway/latest/api/API_CreateStage.html#apigw-CreateStage-request-cacheClusterSize) in the *API Gateway API Reference* .", "CacheDataEncrypted": "Indicates whether the cached responses are encrypted.", "CacheTtlInSeconds": "The time-to-live (TTL) period, in seconds, that specifies how long API Gateway caches responses.", @@ -917,7 +936,7 @@ }, "AWS::ApiGateway::Stage": { "AccessLogSetting": "Access log settings, including the access log format and access log destination ARN.", - "CacheClusterEnabled": "Specifies whether a cache cluster is enabled for the stage.", + "CacheClusterEnabled": "Specifies whether a cache cluster is enabled for the stage. To activate a method-level cache, set `CachingEnabled` to `true` for a method.", "CacheClusterSize": "The stage's cache capacity in GB. For more information about choosing a cache size, see [Enabling API caching to enhance responsiveness](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-caching.html) .", "CanarySetting": "Settings for the canary deployment in this stage.", "ClientCertificateId": "The identifier of a client certificate for an API stage.", @@ -1272,13 +1291,13 @@ "Name": "A name for the environment.", "Tags": "Metadata to assign to the environment. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define." }, - "AWS::AppConfig::Environment Monitors": { + "AWS::AppConfig::Environment Monitor": { "AlarmArn": "Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.", "AlarmRoleArn": "ARN of an AWS Identity and Access Management (IAM) role for AWS AppConfig to monitor `AlarmArn` ." }, - "AWS::AppConfig::Environment Tags": { - "Key": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", - "Value": "The tag value can be up to 256 characters." + "AWS::AppConfig::Environment Tag": { + "Key": "", + "Value": "" }, "AWS::AppConfig::Extension": { "Actions": "The actions defined in the extension.", @@ -2713,8 +2732,8 @@ "TagItems": "The items of the tag." }, "AWS::AppStream::AppBlock TagItems": { - "TagKey": "The key of the tag.", - "TagValue": "The value of the tag." + "Key": "", + "Value": "" }, "AWS::AppStream::AppBlockBuilder": { "AccessEndpoints": "The access endpoints of the app block builder.", @@ -2763,8 +2782,8 @@ "TagItems": "The items of the tag." }, "AWS::AppStream::Application TagItems": { - "TagKey": "The key of the tag.", - "TagValue": "The value of the tag." + "Key": "", + "Value": "" }, "AWS::AppStream::ApplicationEntitlementAssociation": { "ApplicationIdentifier": "The identifier of the application.", @@ -3050,6 +3069,7 @@ "AdditionalAuthenticationProviders": "A list of additional authentication providers for the `GraphqlApi` API.", "ApiType": "The value that indicates whether the GraphQL API is a standard API ( `GRAPHQL` ) or merged API ( `MERGED` ).\n\n*WARNING* : If the `ApiType` has not been defined, *explicitly* setting it to `GRAPHQL` in a template/stack update will result in an API replacement and new DNS values.\n\nThe following values are valid:\n\n`GRAPHQL | MERGED`", "AuthenticationType": "Security configuration for your GraphQL API. For allowed values (such as `API_KEY` , `AWS_IAM` , `AMAZON_COGNITO_USER_POOLS` , `OPENID_CONNECT` , or `AWS_LAMBDA` ), see [Security](https://docs.aws.amazon.com/appsync/latest/devguide/security.html) in the *AWS AppSync Developer Guide* .", + "EnvironmentVariables": "A map containing the list of resources with their properties and environment variables. For more information, see [Environmental variables](https://docs.aws.amazon.com/appsync/latest/devguide/environmental-variables.html) .", "IntrospectionConfig": "Sets the value of the GraphQL API to enable ( `ENABLED` ) or disable ( `DISABLED` ) introspection. If no value is provided, the introspection configuration will be set to `ENABLED` by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled.\n\nFor more information about introspection, see [GraphQL introspection](https://docs.aws.amazon.com/https://graphql.org/learn/introspection/) .", "LambdaAuthorizerConfig": "A `LambdaAuthorizerConfig` holds configuration on how to authorize AWS AppSync API access when using the `AWS_LAMBDA` authorizer mode. Be aware that an AWS AppSync API may have only one Lambda authorizer configured at a time.", "LogConfig": "The Amazon CloudWatch Logs configuration.", @@ -3526,8 +3546,8 @@ "Min": "The minimum value in Mbps." }, "AWS::AutoScaling::AutoScalingGroup InstanceMaintenancePolicy": { - "MaxHealthyPercentage": "Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. After it's set, a value of `-1` will clear the previously set value.\n\nBoth `MinHealthyPercentage` and `MaxHealthyPercentage` must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.", - "MinHealthyPercentage": "Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. After it's set, a value of `-1` will clear the previously set value." + "MaxHealthyPercentage": "Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. To clear a previously set value, specify a value of `-1` .\n\nBoth `MinHealthyPercentage` and `MaxHealthyPercentage` must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.", + "MinHealthyPercentage": "Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. To clear a previously set value, specify a value of `-1` ." }, "AWS::AutoScaling::AutoScalingGroup InstanceRequirements": { "AcceleratorCount": "The minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips) for an instance type.\n\nTo exclude accelerator-enabled instance types, set `Max` to `0` .\n\nDefault: No minimum or maximum limits", @@ -3544,13 +3564,14 @@ "InstanceGenerations": "Indicates whether current or previous generation instance types are included.\n\n- For current generation instance types, specify `current` . The current generation includes EC2 instance types currently recommended for use. This typically includes the latest two to three generations in each instance family. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide for Linux Instances* .\n- For previous generation instance types, specify `previous` .\n\nDefault: Any current or previous generation", "LocalStorage": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, see [Amazon EC2 instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nDefault: `included`", "LocalStorageTypes": "Indicates the type of local storage that is required.\n\n- For instance types with hard disk drive (HDD) storage, specify `hdd` .\n- For instance types with solid state drive (SSD) storage, specify `ssd` .\n\nDefault: Any local storage type", + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` .", "MemoryGiBPerVCpu": "The minimum and maximum amount of memory per vCPU for an instance type, in GiB.\n\nDefault: No minimum or maximum limits", "MemoryMiB": "The minimum and maximum instance memory size for an instance type, in MiB.", "NetworkBandwidthGbps": "The minimum and maximum amount of network bandwidth, in gigabits per second (Gbps).\n\nDefault: No minimum or maximum limits", "NetworkInterfaceCount": "The minimum and maximum number of network interfaces for an instance type.\n\nDefault: No minimum or maximum limits", - "OnDemandMaxPricePercentageOverLowestPrice": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.\n\nDefault: `20`", + "OnDemandMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per instance price.\n\nDefault: `20`", "RequireHibernateSupport": "Indicates whether instance types must provide On-Demand Instance hibernation support.\n\nDefault: `false`", - "SpotMaxPricePercentageOverLowestPrice": "The price protection threshold for Spot Instances. This is the maximum you\u2019ll pay for a Spot Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.\n\nDefault: `100`", + "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. \n\nDefault: `100`", "TotalLocalStorageGB": "The minimum and maximum total local storage size for an instance type, in GB.\n\nDefault: No minimum or maximum limits", "VCpuCount": "The minimum and maximum number of vCPUs for an instance type." }, @@ -3961,7 +3982,8 @@ }, "AWS::Backup::BackupPlan LifecycleResourceType": { "DeleteAfterDays": "Specifies the number of days after creation that a recovery point is deleted. Must be greater than `MoveToColdStorageAfterDays` .", - "MoveToColdStorageAfterDays": "Specifies the number of days after creation that a recovery point is moved to cold storage." + "MoveToColdStorageAfterDays": "Specifies the number of days after creation that a recovery point is moved to cold storage.", + "OptInToArchiveForSupportedResources": "" }, "AWS::Backup::BackupSelection": { "BackupPlanId": "Uniquely identifies a backup plan.", @@ -4625,18 +4647,30 @@ "Value": "The value of the tag. Tag values are case-sensitive and can be null." }, "AWS::Cassandra::Table": { + "AutoScalingSpecifications": "The optional auto scaling capacity settings for a table in provisioned capacity mode.", "BillingMode": "The billing mode for the table, which determines how you'll be charged for reads and writes:\n\n- *On-demand mode* (default) - You pay based on the actual reads and writes your application performs.\n- *Provisioned mode* - Lets you specify the number of reads and writes per second that you need for your application.\n\nIf you don't specify a value for this property, then the table will use on-demand mode.", "ClientSideTimestampsEnabled": "Enables client-side timestamps for the table. By default, the setting is disabled. You can enable client-side timestamps with the following option:\n\n- `status: \"enabled\"`\n\nAfter client-side timestamps are enabled for a table, you can't disable this setting.", "ClusteringKeyColumns": "One or more columns that determine how the table data is sorted.", "DefaultTimeToLive": "The default Time To Live (TTL) value for all rows in a table in seconds. The maximum configurable value is 630,720,000 seconds, which is the equivalent of 20 years. By default, the TTL value for a table is 0, which means data does not expire.\n\nFor more information, see [Setting the default TTL value for a table](https://docs.aws.amazon.com/keyspaces/latest/devguide/TTL-how-it-works.html#ttl-howitworks_default_ttl) in the *Amazon Keyspaces Developer Guide* .", - "EncryptionSpecification": "The encryption at rest options for the table.\n\n- *AWS owned key* (default) - The key is owned by Amazon Keyspaces.\n- *Customer managed key* - The key is stored in your account and is created, owned, and managed by you.\n\n> If you choose encryption with a customer managed key, you must specify a valid customer managed KMS key with permissions granted to Amazon Keyspaces.\n\nFor more information, see [Encryption at rest in Amazon Keyspaces](https://docs.aws.amazon.com/keyspaces/latest/devguide/EncryptionAtRest.html) in the *Amazon Keyspaces Developer Guide* .", + "EncryptionSpecification": "The encryption at rest options for the table.\n\n- *AWS owned key* (default) - The key is owned by Amazon Keyspaces .\n- *Customer managed key* - The key is stored in your account and is created, owned, and managed by you.\n\n> If you choose encryption with a customer managed key, you must specify a valid customer managed KMS key with permissions granted to Amazon Keyspaces.\n\nFor more information, see [Encryption at rest in Amazon Keyspaces](https://docs.aws.amazon.com/keyspaces/latest/devguide/EncryptionAtRest.html) in the *Amazon Keyspaces Developer Guide* .", "KeyspaceName": "The name of the keyspace to create the table in. The keyspace must already exist.", "PartitionKeyColumns": "One or more columns that uniquely identify every row in the table. Every table must have a partition key.", "PointInTimeRecoveryEnabled": "Specifies if point-in-time recovery is enabled or disabled for the table. The options are `PointInTimeRecoveryEnabled=true` and `PointInTimeRecoveryEnabled=false` . If not specified, the default is `PointInTimeRecoveryEnabled=false` .", "RegularColumns": "One or more columns that are not part of the primary key - that is, columns that are *not* defined as partition key columns or clustering key columns.\n\nYou can add regular columns to existing tables by adding them to the template.", + "ReplicaSpecifications": "The AWS Region specific settings of a multi-Region table.\n\nFor a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters.\n\n- `region` : The Region where these settings are applied. (Required)\n- `readCapacityUnits` : The provisioned read capacity units. (Optional)\n- `readCapacityAutoScaling` : The read capacity auto scaling settings for the table. (Optional)", "TableName": "The name of the table to be created. The table name is case sensitive. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the table name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n> If you specify a name, you can't perform updates that require replacing this resource. You can perform updates that require no interruption or some interruption. If you must replace the resource, specify a new name. \n\n*Length constraints:* Minimum length of 3. Maximum length of 255.\n\n*Pattern:* `^[a-zA-Z0-9][a-zA-Z0-9_]{1,47}$`", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, + "AWS::Cassandra::Table AutoScalingSetting": { + "AutoScalingDisabled": "This optional parameter enables auto scaling for the table if set to `false` .", + "MaximumUnits": "Manage costs by specifying the maximum amount of throughput to provision. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "MinimumUnits": "The minimum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "ScalingPolicy": "Amazon Keyspaces supports the `target tracking` auto scaling policy. With this policy, Amazon Keyspaces auto scaling ensures that the table's ratio of consumed to provisioned capacity stays at or near the target value that you specify. You define the target value as a percentage between 20 and 90." + }, + "AWS::Cassandra::Table AutoScalingSpecification": { + "ReadCapacityAutoScaling": "The auto scaling settings for the table's read capacity.", + "WriteCapacityAutoScaling": "The auto scaling settings for the table's write capacity." + }, "AWS::Cassandra::Table BillingMode": { "Mode": "The billing mode for the table:\n\n- On-demand mode - `ON_DEMAND`\n- Provisioned mode - `PROVISIONED`\n\n> If you choose `PROVISIONED` mode, then you also need to specify provisioned throughput (read and write capacity) for the table.\n\nValid values: `ON_DEMAND` | `PROVISIONED`", "ProvisionedThroughput": "The provisioned read capacity and write capacity for the table. For more information, see [Provisioned throughput capacity mode](https://docs.aws.amazon.com/keyspaces/latest/devguide/ReadWriteCapacityMode.html#ReadWriteCapacityMode.Provisioned) in the *Amazon Keyspaces Developer Guide* ." @@ -4657,10 +4691,24 @@ "ReadCapacityUnits": "The amount of read capacity that's provisioned for the table. For more information, see [Read/write capacity mode](https://docs.aws.amazon.com/keyspaces/latest/devguide/ReadWriteCapacityMode.html) in the *Amazon Keyspaces Developer Guide* .", "WriteCapacityUnits": "The amount of write capacity that's provisioned for the table. For more information, see [Read/write capacity mode](https://docs.aws.amazon.com/keyspaces/latest/devguide/ReadWriteCapacityMode.html) in the *Amazon Keyspaces Developer Guide* ." }, + "AWS::Cassandra::Table ReplicaSpecification": { + "ReadCapacityAutoScaling": "The read capacity auto scaling settings for the multi-Region table in the specified AWS Region.", + "ReadCapacityUnits": "The provisioned read capacity units for the multi-Region table in the specified AWS Region.", + "Region": "The AWS Region." + }, + "AWS::Cassandra::Table ScalingPolicy": { + "TargetTrackingScalingPolicyConfiguration": "The auto scaling policy that scales a table based on the ratio of consumed to provisioned capacity." + }, "AWS::Cassandra::Table Tag": { "Key": "The key of the tag. Tag keys are case sensitive. Each Amazon Keyspaces resource can only have up to one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.", "Value": "The value of the tag. Tag values are case-sensitive and can be null." }, + "AWS::Cassandra::Table TargetTrackingScalingPolicyConfiguration": { + "DisableScaleIn": "Specifies if `scale-in` is enabled.\n\nWhen auto scaling automatically decreases capacity for a table, the table *scales in* . When scaling policies are set, they can't scale in the table lower than its minimum capacity.", + "ScaleInCooldown": "Specifies a `scale-in` cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "ScaleOutCooldown": "Specifies a scale out cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "TargetValue": "Specifies the target value for the target tracking auto scaling policy.\n\nAmazon Keyspaces auto scaling scales up capacity automatically when traffic exceeds this target utilization rate, and then back down when it falls below the target. This ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define `targetValue` as a percentage. An `integer` between 20 and 90." + }, "AWS::CertificateManager::Account": { "ExpiryEventsConfiguration": "Object containing expiration events options associated with an AWS account . For more information, see [ExpiryEventsConfiguration](https://docs.aws.amazon.com/acm/latest/APIReference/API_ExpiryEventsConfiguration.html) in the API reference." }, @@ -5182,7 +5230,7 @@ "DefaultTTL": "This field is deprecated. We recommend that you use the `DefaultTTL` field in a cache policy instead of this field. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nThe default amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin does not add HTTP headers such as `Cache-Control max-age` , `Cache-Control s-maxage` , and `Expires` to objects. For more information, see [Managing How Long Content Stays in an Edge Cache (Expiration)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) in the *Amazon CloudFront Developer Guide* .", "FieldLevelEncryptionId": "The value of `ID` for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for the default cache behavior.", "ForwardedValues": "This field is deprecated. We recommend that you use a cache policy or an origin request policy instead of this field. For more information, see [Working with policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/working-with-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nIf you want to include values in the cache key, use a cache policy. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nIf you want to send values to the origin but not include them in the cache key, use an origin request policy. For more information, see [Creating origin request policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) or [Using the managed origin request policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-origin-request-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nA `DefaultCacheBehavior` must include either a `CachePolicyId` or `ForwardedValues` . We recommend that you use a `CachePolicyId` .\n\nA complex type that specifies how CloudFront handles query strings, cookies, and HTTP headers.", - "FunctionAssociations": "A list of CloudFront functions that are associated with this cache behavior. CloudFront functions must be published to the `LIVE` stage to associate them with a cache behavior.", + "FunctionAssociations": "A list of CloudFront functions that are associated with this cache behavior. Your functions must be published to the `LIVE` stage to associate them with a cache behavior.", "LambdaFunctionAssociations": "A complex type that contains zero or more Lambda@Edge function associations for a cache behavior.", "MaxTTL": "This field is deprecated. We recommend that you use the `MaxTTL` field in a cache policy instead of this field. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nThe maximum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as `Cache-Control max-age` , `Cache-Control s-maxage` , and `Expires` to objects. For more information, see [Managing How Long Content Stays in an Edge Cache (Expiration)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) in the *Amazon CloudFront Developer Guide* .", "MinTTL": "This field is deprecated. We recommend that you use the `MinTTL` field in a cache policy instead of this field. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nThe minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see [Managing How Long Content Stays in an Edge Cache (Expiration)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) in the *Amazon CloudFront Developer Guide* .\n\nYou must specify `0` for `MinTTL` if you configure CloudFront to forward all headers to your origin (under `Headers` , if you specify `1` for `Quantity` and `*` for `Name` ).", @@ -5216,7 +5264,7 @@ "S3Origin": "", "Staging": "A Boolean that indicates whether this is a staging distribution. When this value is `true` , this is a staging distribution. When this value is `false` , this is not a staging distribution.", "ViewerCertificate": "A complex type that determines the distribution's SSL/TLS configuration for communicating with viewers.", - "WebACLId": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `473e64fd-f30b-4765-81a0-62ad96dd167a` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) ." + "WebACLId": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) ." }, "AWS::CloudFront::Distribution ForwardedValues": { "Cookies": "This field is deprecated. We recommend that you use a cache policy or an origin request policy instead of this field.\n\nIf you want to include cookies in the cache key, use a cache policy. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) in the *Amazon CloudFront Developer Guide* .\n\nIf you want to send cookies to the origin but not include them in the cache key, use an origin request policy. For more information, see [Creating origin request policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) in the *Amazon CloudFront Developer Guide* .\n\nA complex type that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones. For more information about forwarding cookies to the origin, see [How CloudFront Forwards, Caches, and Logs Cookies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) in the *Amazon CloudFront Developer Guide* .", @@ -5322,14 +5370,14 @@ }, "AWS::CloudFront::Function FunctionConfig": { "Comment": "A comment to describe the function.", - "KeyValueStoreAssociations": "The configuration for the Key Value Store associations.", + "KeyValueStoreAssociations": "The configuration for the key value store associations.", "Runtime": "The function's runtime environment version." }, "AWS::CloudFront::Function FunctionMetadata": { "FunctionARN": "The Amazon Resource Name (ARN) of the function. The ARN uniquely identifies the function." }, "AWS::CloudFront::Function KeyValueStoreAssociation": { - "KeyValueStoreARN": "The Amazon Resource Name (ARN) of the Key Value Store association." + "KeyValueStoreARN": "The Amazon Resource Name (ARN) of the key value store association." }, "AWS::CloudFront::KeyGroup": { "KeyGroupConfig": "The key group configuration." @@ -5340,13 +5388,13 @@ "Name": "A name to identify the key group." }, "AWS::CloudFront::KeyValueStore": { - "Comment": "A comment for the Key Value Store.", - "ImportSource": "The import source for the Key Value Store.", - "Name": "The name of the Key Value Store." + "Comment": "A comment for the key value store.", + "ImportSource": "The import source for the key value store.", + "Name": "The name of the key value store." }, "AWS::CloudFront::KeyValueStore ImportSource": { - "SourceArn": "The Amazon Resource Name (ARN) of the import source for the Key Value Store.", - "SourceType": "The source type of the import source for the Key Value Store." + "SourceArn": "The Amazon Resource Name (ARN) of the import source for the key value store.", + "SourceType": "The source type of the import source for the key value store." }, "AWS::CloudFront::MonitoringSubscription": { "DistributionId": "The ID of the distribution that you are enabling metrics for.", @@ -5363,7 +5411,7 @@ }, "AWS::CloudFront::OriginAccessControl OriginAccessControlConfig": { "Description": "A description of the origin access control.", - "Name": "A name to identify the origin access control.", + "Name": "A name to identify the origin access control. You can specify up to 64 characters.", "OriginAccessControlOriginType": "The type of origin that this origin access control is for.", "SigningBehavior": "Specifies which requests CloudFront signs (adds authentication information to). Specify `always` for the most common use case. For more information, see [origin access control advanced settings](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html#oac-advanced-settings) in the *Amazon CloudFront Developer Guide* .\n\nThis field can have one of the following values:\n\n- `always` \u2013 CloudFront signs all origin requests, overwriting the `Authorization` header from the viewer request if one exists.\n- `never` \u2013 CloudFront doesn't sign any origin requests. This value turns off origin access control for all origins in all distributions that use this origin access control.\n- `no-override` \u2013 If the viewer request doesn't contain the `Authorization` header, then CloudFront signs the origin request. If the viewer request contains the `Authorization` header, then CloudFront doesn't sign the origin request and instead passes along the `Authorization` header from the viewer request. *WARNING: To pass along the `Authorization` header from the viewer request, you *must* add the `Authorization` header to a [cache policy](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html) for all cache behaviors that use origins associated with this origin access control.*", "SigningProtocol": "The signing protocol of the origin access control, which determines how CloudFront signs (authenticates) requests. The only valid value is `sigv4` ." @@ -5480,7 +5528,7 @@ "ContentTypeOptions": "Determines whether CloudFront includes the `X-Content-Type-Options` HTTP response header with its value set to `nosniff` .\n\nFor more information about the `X-Content-Type-Options` HTTP response header, see [X-Content-Type-Options](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options) in the MDN Web Docs.", "FrameOptions": "Determines whether CloudFront includes the `X-Frame-Options` HTTP response header and the header's value.\n\nFor more information about the `X-Frame-Options` HTTP response header, see [X-Frame-Options](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options) in the MDN Web Docs.", "ReferrerPolicy": "Determines whether CloudFront includes the `Referrer-Policy` HTTP response header and the header's value.\n\nFor more information about the `Referrer-Policy` HTTP response header, see [Referrer-Policy](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy) in the MDN Web Docs.", - "StrictTransportSecurity": "Determines whether CloudFront includes the `Strict-Transport-Security` HTTP response header and the header's value.\n\nFor more information about the `Strict-Transport-Security` HTTP response header, see [Strict-Transport-Security](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) in the MDN Web Docs.", + "StrictTransportSecurity": "Determines whether CloudFront includes the `Strict-Transport-Security` HTTP response header and the header's value.\n\nFor more information about the `Strict-Transport-Security` HTTP response header, see [Security headers](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/understanding-response-headers-policies.html#understanding-response-headers-policies-security) in the *Amazon CloudFront Developer Guide* and [Strict-Transport-Security](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) in the MDN Web Docs.", "XSSProtection": "Determines whether CloudFront includes the `X-XSS-Protection` HTTP response header and the header's value.\n\nFor more information about the `X-XSS-Protection` HTTP response header, see [X-XSS-Protection](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection) in the MDN Web Docs." }, "AWS::CloudFront::ResponseHeadersPolicy ServerTimingHeadersConfig": { @@ -5864,6 +5912,7 @@ "Certificate": "The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see [certificate](https://docs.aws.amazon.com/codebuild/latest/userguide/create-project-cli.html#cli.environment.certificate) in the *AWS CodeBuild User Guide* .", "ComputeType": "The type of compute environment. This determines the number of CPU cores and memory the build environment uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 15 GB memory and 8 vCPUs for builds.\n\nFor more information, see [Build Environment Compute Types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "EnvironmentVariables": "A set of environment variables to make available to builds for this build project.", + "Fleet:": "", "Image": "The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:\n\n- For an image tag: `/:` . For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be `aws/codebuild/standard:4.0` .\n- For an image digest: `/@` . For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use `/@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf` .\n\nFor more information, see [Docker images provided by CodeBuild](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-available.html) in the *AWS CodeBuild user guide* .", "ImagePullCredentialsType": "The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:\n\n- `CODEBUILD` specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild service principal.\n- `SERVICE_ROLE` specifies that AWS CodeBuild uses your build project's service role.\n\nWhen you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials.", "PrivilegedMode": "Enables running the Docker daemon inside a Docker container. Set to true only if the build project is used to build Docker images. Otherwise, a build that attempts to interact with the Docker daemon fails. The default setting is `false` .\n\nYou can initialize the Docker daemon during the install phase of your build by adding one of the following sets of commands to the install phase of your buildspec file:\n\nIf the operating system's base image is Ubuntu Linux:\n\n`- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&`\n\n`- timeout 15 sh -c \"until docker info; do echo .; sleep 1; done\"`\n\nIf the operating system's base image is Alpine Linux and the previous command does not work, add the `-t` argument to `timeout` :\n\n`- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&`\n\n`- timeout -t 15 sh -c \"until docker info; do echo .; sleep 1; done\"`", @@ -5901,6 +5950,9 @@ "MountPoint": "The location in the container where you mount the file system.", "Type": "The type of the file system. The one supported type is `EFS` ." }, + "AWS::CodeBuild::Project ProjectFleet": { + "FleetArn": "Specifies the compute fleet ARN for the build project." + }, "AWS::CodeBuild::Project ProjectSourceVersion": { "SourceIdentifier": "An identifier for a source in the build project. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.", "SourceVersion": "The source version for the corresponding source identifier. If specified, must be one of:\n\n- For CodeCommit: the commit ID, branch, or Git tag to use.\n- For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format `pr/pull-request-ID` (for example, `pr/25` ). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.\n- For Amazon S3: the version ID of the object that represents the build input ZIP file to use.\n\nFor more information, see [Source Version Sample with CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) in the *AWS CodeBuild User Guide* ." @@ -6245,7 +6297,7 @@ "ArtifactStores": "A mapping of `artifactStore` objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.\n\n> You must include either `artifactStore` or `artifactStores` in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use `artifactStores` .", "DisableInboundStageTransitions": "Represents the input of a `DisableStageTransition` action.", "Name": "The name of the pipeline.", - "PipelineType": "CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.\n\n- V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.\n- V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.\n\n> Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs. \n\nFor information about pricing for CodePipeline, see [Pricing](https://docs.aws.amazon.com/https://aws.amazon.com/codepipeline/pricing/) .\n\nFor information about which type of pipeline to choose, see [What type of pipeline is right for me?](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-types-planning.html) .", + "PipelineType": "CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.\n\n- V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.\n- V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.\n\n> Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs. \n\nFor information about pricing for CodePipeline, see [Pricing](https://docs.aws.amazon.com/codepipeline/pricing/) .\n\nFor information about which type of pipeline to choose, see [What type of pipeline is right for me?](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-types-planning.html) .", "RestartExecutionOnUpdate": "Indicates whether to rerun the CodePipeline pipeline after you update it.", "RoleArn": "The Amazon Resource Name (ARN) for CodePipeline to use to either perform actions with no `actionRoleArn` , or to use to assume roles for actions with an `actionRoleArn` .", "Stages": "Represents information about a stage and its definition.", @@ -6288,7 +6340,7 @@ "Type": "The type of encryption key, such as an AWS KMS key. When creating or updating a pipeline, the value must be set to 'KMS'." }, "AWS::CodePipeline::Pipeline GitConfiguration": { - "Push": "The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.\n\n> Git tags is the only supported event type.", + "Push": "The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.", "SourceActionName": "The name of the pipeline source action where the trigger configuration, such as Git tags, is specified. The trigger configuration will start the pipeline upon the specified change only.\n\n> You can only specify one trigger configuration per source action." }, "AWS::CodePipeline::Pipeline GitPushFilter": { @@ -6392,20 +6444,20 @@ "SyncType": "The type of sync for a specific sync configuration." }, "AWS::CodeStarNotifications::NotificationRule": { - "CreatedBy": "", + "CreatedBy": "The name or email alias of the person who created the notification rule.", "DetailType": "The level of detail to include in the notifications for this resource. `BASIC` will include only the contents of the event as it would appear in Amazon CloudWatch. `FULL` will include any supplemental information provided by AWS CodeStar Notifications and/or the service for the resource for which the notification is created.", - "EventTypeId": "", + "EventTypeId": "The event type associated with this notification rule. For a complete list of event types and IDs, see [Notification concepts](https://docs.aws.amazon.com/dtconsole/latest/userguide/concepts.html#concepts-api) in the *Developer Tools Console User Guide* .", "EventTypeIds": "A list of event types associated with this notification rule. For a complete list of event types and IDs, see [Notification concepts](https://docs.aws.amazon.com/dtconsole/latest/userguide/concepts.html#concepts-api) in the *Developer Tools Console User Guide* .", "Name": "The name for the notification rule. Notification rule names must be unique in your AWS account .", "Resource": "The Amazon Resource Name (ARN) of the resource to associate with the notification rule. Supported resources include pipelines in AWS CodePipeline , repositories in AWS CodeCommit , and build projects in AWS CodeBuild .", "Status": "The status of the notification rule. The default value is `ENABLED` . If the status is set to `DISABLED` , notifications aren't sent for the notification rule.", "Tags": "A list of tags to apply to this notification rule. Key names cannot start with \" `aws` \".", - "TargetAddress": "", - "Targets": "A list of Amazon Resource Names (ARNs) of Amazon Simple Notification Service topics and AWS Chatbot clients to associate with the notification rule." + "TargetAddress": "The Amazon Resource Name (ARN) of the Amazon SNS topic or AWS Chatbot client.", + "Targets": "A list of Amazon Resource Names (ARNs) of Amazon SNS topics and AWS Chatbot clients to associate with the notification rule." }, "AWS::CodeStarNotifications::NotificationRule Target": { "TargetAddress": "The Amazon Resource Name (ARN) of the AWS Chatbot topic or AWS Chatbot client.", - "TargetType": "The target type. Can be an Amazon Simple Notification Service topic or AWS Chatbot client.\n\n- Amazon Simple Notification Service topics are specified as `SNS` .\n- AWS Chatbot clients are specified as `AWSChatbotSlack` ." + "TargetType": "The target type. Can be an Amazon Simple Notification Service topic or AWS Chatbot client.\n\n- Amazon Simple Notification Service topics are specified as `SNS` .\n- AWS Chatbot clients are specified as `AWSChatbotSlack` .\n- AWS Chatbot clients for Microsoft Teams are specified as `AWSChatbotMicrosoftTeams` ." }, "AWS::Cognito::IdentityPool": { "AllowClassicFlow": "Enables the Basic (Classic) authentication flow.", @@ -6452,10 +6504,10 @@ "Value": "A brief string that the claim must match. For example, \"paid\" or \"yes\"." }, "AWS::Cognito::IdentityPoolRoleAttachment RoleMapping": { - "AmbiguousRoleResolution": "Specifies the action to be taken if either no rules match the claim value for the Rules type, or there is no `cognito:preferred_role` claim and there are multiple `cognito:roles` matches for the Token type. If you specify Token or Rules as the Type, AmbiguousRoleResolution is required.\n\nValid values are `AuthenticatedRole` or `Deny` .", + "AmbiguousRoleResolution": "If you specify Token or Rules as the `Type` , `AmbiguousRoleResolution` is required.\n\nSpecifies the action to be taken if either no rules match the claim value for the `Rules` type, or there is no `cognito:preferred_role` claim and there are multiple `cognito:roles` matches for the `Token` type.", "IdentityProvider": "Identifier for the identity provider for which the role is mapped. For example: `graph.facebook.com` or `cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id (http://cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id)` . This is the identity provider that is used by the user for authentication.\n\nIf the identity provider property isn't provided, the key of the entry in the `RoleMappings` map is used as the identity provider.", "RulesConfiguration": "The rules to be used for mapping users to roles. If you specify \"Rules\" as the role-mapping type, RulesConfiguration is required.", - "Type": "The role-mapping type. `Token` uses `cognito:roles` and `cognito:preferred_role` claims from the Amazon Cognito identity provider token to map groups to roles. `Rules` attempts to match claims from the token to map to a role.\n\nValid values are `Token` or `Rules` ." + "Type": "The role mapping type. Token will use `cognito:roles` and `cognito:preferred_role` claims from the Cognito identity provider token to map groups to roles. Rules will attempt to match claims from the token to map to a role." }, "AWS::Cognito::IdentityPoolRoleAttachment RulesConfigurationType": { "Rules": "The rules. You can specify up to 25 rules per identity provider." @@ -6477,7 +6529,7 @@ "AdminCreateUserConfig": "The configuration for creating a new user profile.", "AliasAttributes": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n> This user pool property cannot be updated.", "AutoVerifiedAttributes": "The attributes to be auto-verified. Possible values: *email* , *phone_number* .", - "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "DeviceConfiguration": "The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature.", "EmailConfiguration": "The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool.", "EmailVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", @@ -6542,7 +6594,7 @@ "PreAuthentication": "A pre-authentication AWS Lambda trigger.", "PreSignUp": "A pre-registration AWS Lambda trigger.", "PreTokenGeneration": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nSet this parameter for legacy purposes. If you also set an ARN in `PreTokenGenerationConfig` , its value must be identical to `PreTokenGeneration` . For new instances of pre token generation triggers, set the `LambdaArn` of `PreTokenGenerationConfig` .\n\nYou can set ``", - "PreTokenGenerationConfig": "", + "PreTokenGenerationConfig": "The detailed configuration of a pre token generation trigger. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", "UserMigration": "The user migration Lambda config type.", "VerifyAuthChallengeResponse": "Verifies the authentication challenge response." }, @@ -6562,8 +6614,8 @@ "PasswordPolicy": "The password policy." }, "AWS::Cognito::UserPool PreTokenGenerationConfig": { - "LambdaArn": "", - "LambdaVersion": "" + "LambdaArn": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nThis parameter and the `PreTokenGeneration` property of `LambdaConfig` have the same value. For new instances of pre token generation triggers, set `LambdaArn` .", + "LambdaVersion": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features." }, "AWS::Cognito::UserPool RecoveryOption": { "Name": "Specifies the recovery method for a user.", @@ -6658,7 +6710,7 @@ "AWS::Cognito::UserPoolIdentityProvider": { "AttributeMapping": "A mapping of IdP attributes to standard and custom user pool attributes.", "IdpIdentifiers": "A list of IdP identifiers.", - "ProviderDetails": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", + "ProviderDetails": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", "ProviderName": "The IdP name.", "ProviderType": "The IdP type.", "UserPoolId": "The user pool ID." @@ -6723,7 +6775,7 @@ "UserPoolId": "The user pool ID for the user pool." }, "AWS::Cognito::UserPoolUser": { - "ClientMetadata": "A map of custom key-value pairs that you can provide as input for the custom workflow that is invoked by the *pre sign-up* trigger.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you create a `UserPoolUser` resource and include the `ClientMetadata` property, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata property. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing User Pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> Take the following limitations into consideration when you use the ClientMetadata parameter:\n> \n> - Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.\n> - Amazon Cognito does not validate the ClientMetadata value.\n> - Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.", + "ClientMetadata": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing user pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:\n> \n> - Store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose.\n> - Validate the ClientMetadata value.\n> - Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information.", "DesiredDeliveryMediums": "Specify `\"EMAIL\"` if email will be used to send the welcome message. Specify `\"SMS\"` if the phone number will be used. The default value is `\"SMS\"` . You can specify more than one value.", "ForceAliasCreation": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", "MessageAction": "Set to `RESEND` to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", @@ -9186,7 +9238,7 @@ "Schedule": "Specifies a schedule used to periodically transfer files from a source to a destination location. The schedule should be specified in UTC time. For more information, see [Scheduling your task](https://docs.aws.amazon.com/datasync/latest/userguide/task-scheduling.html) .", "SourceLocationArn": "The Amazon Resource Name (ARN) of the source location for the task.", "Tags": "Specifies the tags that you want to apply to the Amazon Resource Name (ARN) representing the task.\n\n*Tags* are key-value pairs that help you manage, filter, and search for your DataSync resources.", - "TaskReportConfig": "Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer." + "TaskReportConfig": "Specifies how you want to configure a task report, which provides detailed information about your DataSync transfer. For more information, see [Monitoring your DataSync transfers with task reports](https://docs.aws.amazon.com/datasync/latest/userguide/task-reports.html) .\n\nWhen using this parameter, your caller identity (the role that you're using DataSync with) must have the `iam:PassRole` permission. The [AWSDataSyncFullAccess](https://docs.aws.amazon.com/datasync/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-awsdatasyncfullaccess) policy includes this permission." }, "AWS::DataSync::Task Deleted": { "ReportLevel": "Specifies whether you want your task report to include only what went wrong with your transfer or a list of what succeeded and didn't.\n\n- `ERRORS_ONLY` : A report shows what DataSync was unable to delete.\n- `SUCCESSES_AND_ERRORS` : A report shows what DataSync was able and unable to delete." @@ -9679,6 +9731,7 @@ "KeyType": "The role that this key attribute will assume:\n\n- `HASH` - partition key\n- `RANGE` - sort key\n\n> The partition key of an item is also known as its *hash attribute* . The term \"hash attribute\" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n> \n> The sort key of an item is also known as its *range attribute* . The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value." }, "AWS::DynamoDB::GlobalTable KinesisStreamSpecification": { + "ApproximateCreationDateTimePrecision": "The precision for the time and date that the stream was created.", "StreamArn": "The ARN for a specific Kinesis data stream." }, "AWS::DynamoDB::GlobalTable LocalSecondaryIndex": { @@ -9792,6 +9845,7 @@ "KeyType": "The role that this key attribute will assume:\n\n- `HASH` - partition key\n- `RANGE` - sort key\n\n> The partition key of an item is also known as its *hash attribute* . The term \"hash attribute\" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.\n> \n> The sort key of an item is also known as its *range attribute* . The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value." }, "AWS::DynamoDB::Table KinesisStreamSpecification": { + "ApproximateCreationDateTimePrecision": "The precision for the time and date that the stream was created.", "StreamArn": "The ARN for a specific Kinesis data stream.\n\nLength Constraints: Minimum length of 37. Maximum length of 1024." }, "AWS::DynamoDB::Table LocalSecondaryIndex": { @@ -9902,6 +9956,7 @@ "ClientCidrBlock": "The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. Client CIDR range must have a size of at least /22 and must not be greater than /12.", "ClientConnectOptions": "The options for managing connection authorization for new client connections.", "ClientLoginBannerOptions": "Options for enabling a customizable text banner that will be displayed on AWS provided clients when a VPN session is established.", + "ClientRouteMonitoringOptions": "", "ConnectionLogOptions": "Information about the client connection logging options.\n\nIf you enable client connection logging, data about client connections is sent to a Cloudwatch Logs log stream. The following information is logged:\n\n- Client connection requests\n- Client connection results (successful and unsuccessful)\n- Reasons for unsuccessful client connection requests\n- Client connection termination time", "Description": "A brief description of the Client VPN endpoint.", "DnsServers": "Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can have up to two DNS servers. If no DNS server is specified, the DNS address configured on the device is used for the DNS server.", @@ -9932,6 +9987,9 @@ "BannerText": "Customizable text that will be displayed in a banner on AWS provided clients when a VPN session is established. UTF-8 encoded characters only. Maximum of 1400 characters.", "Enabled": "Enable or disable a customizable text banner that will be displayed on AWS provided clients when a VPN session is established.\n\nValid values: `true | false`\n\nDefault value: `false`" }, + "AWS::EC2::ClientVpnEndpoint ClientRouteMonitoringOptions": { + "Enabled": "" + }, "AWS::EC2::ClientVpnEndpoint ConnectionLogOptions": { "CloudwatchLogGroup": "The name of the CloudWatch Logs log group. Required if connection logging is enabled.", "CloudwatchLogStream": "The name of the CloudWatch Logs log stream to which the connection data is published.", @@ -10056,9 +10114,9 @@ "MemoryMiB": "The minimum and maximum amount of memory, in MiB.", "NetworkBandwidthGbps": "The minimum and maximum amount of baseline network bandwidth, in gigabits per second (Gbps). For more information, see [Amazon EC2 instance network bandwidth](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", "NetworkInterfaceCount": "The minimum and maximum number of network interfaces.\n\nDefault: No minimum or maximum limits", - "OnDemandMaxPricePercentageOverLowestPrice": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "OnDemandMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) .\n\nDefault: `false`", - "SpotMaxPricePercentageOverLowestPrice": "The price protection threshold for Spot Instance. This is the maximum you\u2019ll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "TotalLocalStorageGB": "The minimum and maximum amount of total local storage, in GB.\n\nDefault: No minimum or maximum limits", "VCpuCount": "The minimum and maximum number of vCPUs." }, @@ -10373,7 +10431,7 @@ }, "AWS::EC2::Instance NetworkInterface": { "AssociateCarrierIpAddress": "Indicates whether to assign a carrier IP address to the network interface.\n\nYou can only assign a carrier IP address to a network interface that is in a subnet in a Wavelength Zone. For more information about carrier IP addresses, see [Carrier IP address](https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip) in the *AWS Wavelength Developer Guide* .", - "AssociatePublicIpAddress": "Indicates whether to assign a public IPv4 address to an instance. Applies only if creating a network interface when launching an instance. The network interface must be the primary network interface. If launching into a default subnet, the default value is `true` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "AssociatePublicIpAddress": "Indicates whether to assign a public IPv4 address to an instance. Applies only if creating a network interface when launching an instance. The network interface must be the primary network interface. If launching into a default subnet, the default value is `true` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "DeleteOnTermination": "Indicates whether the network interface is deleted when the instance is terminated. Applies only if creating a network interface when launching an instance.", "Description": "The description of the network interface. Applies only if creating a network interface when launching an instance.", "DeviceIndex": "The position of the network interface in the attachment order. A primary network interface has a device index of 0.\n\nIf you create a network interface when launching an instance, you must specify the device index.", @@ -10530,13 +10588,14 @@ "InstanceGenerations": "Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Current generation instance types are typically the latest two to three generations in each instance family. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .\n\nFor current generation instance types, specify `current` .\n\nFor previous generation instance types, specify `previous` .\n\nDefault: Current and previous generation instance types", "LocalStorage": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, [Amazon EC2 instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) in the *Amazon EC2 User Guide* .\n\n- To include instance types with instance store volumes, specify `included` .\n- To require only instance types with instance store volumes, specify `required` .\n- To exclude instance types with instance store volumes, specify `excluded` .\n\nDefault: `included`", "LocalStorageTypes": "The type of local storage that is required.\n\n- For instance types with hard disk drive (HDD) storage, specify `hdd` .\n- For instance types with solid state drive (SSD) storage, specify `ssd` .\n\nDefault: `hdd` and `ssd`", + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` .", "MemoryGiBPerVCpu": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", "MemoryMiB": "The minimum and maximum amount of memory, in MiB.", "NetworkBandwidthGbps": "The minimum and maximum amount of network bandwidth, in gigabits per second (Gbps).\n\nDefault: No minimum or maximum limits", "NetworkInterfaceCount": "The minimum and maximum number of network interfaces.\n\nDefault: No minimum or maximum limits", - "OnDemandMaxPricePercentageOverLowestPrice": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "OnDemandMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) .\n\nDefault: `false`", - "SpotMaxPricePercentageOverLowestPrice": "The price protection threshold for Spot Instances. This is the maximum you\u2019ll pay for a Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "TotalLocalStorageGB": "The minimum and maximum amount of total local storage, in GB.\n\nDefault: No minimum or maximum limits", "VCpuCount": "The minimum and maximum number of vCPUs." }, @@ -10621,7 +10680,7 @@ }, "AWS::EC2::LaunchTemplate NetworkInterface": { "AssociateCarrierIpAddress": "Associates a Carrier IP address with eth0 for a new network interface.\n\nUse this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. For more information about Carrier IP addresses, see [Carrier IP addresses](https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip) in the *AWS Wavelength Developer Guide* .", - "AssociatePublicIpAddress": "Associates a public IPv4 address with eth0 for a new network interface.\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "AssociatePublicIpAddress": "Associates a public IPv4 address with eth0 for a new network interface.\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "ConnectionTrackingSpecification": "A connection tracking specification for the network interface.", "DeleteOnTermination": "Indicates whether the network interface is deleted when the instance is terminated.", "Description": "A description for the network interface.", @@ -11207,7 +11266,7 @@ "Ipv6Address": "The IPv6 address." }, "AWS::EC2::SpotFleet InstanceNetworkInterfaceSpecification": { - "AssociatePublicIpAddress": "Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is `true` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "AssociatePublicIpAddress": "Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is `true` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "DeleteOnTermination": "Indicates whether the network interface is deleted when the instance is terminated.", "Description": "The description of the network interface. Applies only if creating a network interface when launching an instance.", "DeviceIndex": "The position of the network interface in the attachment order. A primary network interface has a device index of 0.\n\nIf you specify a network interface when launching an instance, you must specify the device index.", @@ -11238,9 +11297,9 @@ "MemoryMiB": "The minimum and maximum amount of memory, in MiB.", "NetworkBandwidthGbps": "The minimum and maximum amount of baseline network bandwidth, in gigabits per second (Gbps). For more information, see [Amazon EC2 instance network bandwidth](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", "NetworkInterfaceCount": "The minimum and maximum number of network interfaces.\n\nDefault: No minimum or maximum limits", - "OnDemandMaxPricePercentageOverLowestPrice": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "OnDemandMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) .\n\nDefault: `false`", - "SpotMaxPricePercentageOverLowestPrice": "The price protection threshold for Spot Instance. This is the maximum you\u2019ll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "TotalLocalStorageGB": "The minimum and maximum amount of total local storage, in GB.\n\nDefault: No minimum or maximum limits", "VCpuCount": "The minimum and maximum number of vCPUs." }, @@ -11376,7 +11435,7 @@ "Ipv6IpamPoolId": "An IPv6 IPAM pool ID for the subnet.", "Ipv6Native": "Indicates whether this is an IPv6 only subnet. For more information, see [Subnet basics](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#subnet-basics) in the *Amazon Virtual Private Cloud User Guide* .", "Ipv6NetmaskLength": "An IPv6 netmask length for the subnet.", - "MapPublicIpOnLaunch": "Indicates whether instances launched in this subnet receive a public IPv4 address. The default value is `false` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "MapPublicIpOnLaunch": "Indicates whether instances launched in this subnet receive a public IPv4 address. The default value is `false` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "OutpostArn": "The Amazon Resource Name (ARN) of the Outpost.", "PrivateDnsNameOptionsOnLaunch": "The hostname type for EC2 instances launched into this subnet and how DNS A and AAAA record queries to the instances should be handled. For more information, see [Amazon EC2 instance hostname types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nAvailable options:\n\n- EnableResourceNameDnsAAAARecord (true | false)\n- EnableResourceNameDnsARecord (true | false)\n- HostnameType (ip-name | resource-name)", "Tags": "Any tags assigned to the subnet.", @@ -12043,7 +12102,7 @@ "Tags": "The tags applied to this Amazon EBS volume. `AmazonECSCreated` and `AmazonECSManaged` are reserved tags that can't be used." }, "AWS::ECS::Service LoadBalancer": { - "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.", + "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", "ContainerPort": "The port on the container to associate with the load balancer. This port must correspond to a `containerPort` in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they're launched on must allow ingress traffic on the `hostPort` of the port mapping.", "LoadBalancerName": "The name of the load balancer to associate with the Amazon ECS service or task set.\n\nIf you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.", "TargetGroupArn": "The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.\n\nA target group ARN is only specified when using an Application Load Balancer or Network Load Balancer.\n\nFor services using the `ECS` deployment controller, you can specify one or multiple target groups. For more information, see [Registering multiple target groups with a service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/register-multiple-targetgroups.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor services using the `CODE_DEPLOY` deployment controller, you're required to define two target groups for the load balancer. For more information, see [Blue/green deployment with CodeDeploy](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-bluegreen.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> If your service's task definition uses the `awsvpc` network mode, you must choose `ip` as the target type, not `instance` . Do this when creating your target groups because tasks that use the `awsvpc` network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type." @@ -12150,6 +12209,7 @@ "AWS::ECS::TaskDefinition ContainerDefinition": { "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#cmd](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) . If there are multiple arguments, each argument is a separated string in the array.", "Cpu": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see [CPU share constraint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#cpu-share-constraint) in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter isn't required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "CredentialSpecs": "A list of ARNs in SSM or Amazon S3 to a credential spec ( `CredSpec` ) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the `dockerSecurityOptions` . The maximum number of ARNs is 1.\n\nThere are two formats for each ARN.\n\n- **credentialspecdomainless:MyARN** - You use `credentialspecdomainless:MyARN` to provide a `CredSpec` with an additional section for a secret in AWS Secrets Manager . You provide the login credentials to the domain in the secret.\n\nEach task that runs on any container instance can join different domains.\n\nYou can use this format without joining the container instance to a domain.\n- **credentialspec:MyARN** - You use `credentialspec:MyARN` to provide a `CredSpec` for a single domain.\n\nYou must join the container instance to the domain before you start any tasks that use this task definition.\n\nIn both formats, replace `MyARN` with the ARN in SSM or Amazon S3.\n\nIf you provide a `credentialspecdomainless:MyARN` , the `credspec` must provide a ARN in AWS Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) .", "DependsOn": "The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.\n\nFor tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to turn on container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nIf the task definition is used in a blue/green deployment that uses [AWS::CodeDeploy::DeploymentGroup BlueGreenDeploymentConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codedeploy-deploymentgroup-bluegreendeploymentconfiguration.html) , the `dependsOn` parameter is not supported. For more information see [Issue #680](https://docs.aws.amazon.com/https://github.com/aws-cloudformation/cloudformation-coverage-roadmap/issues/680) on the on the GitHub website.", "DisableNetworking": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .\n\n> This parameter is not supported for Windows containers.", "DnsSearchDomains": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns-search` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", @@ -12182,7 +12242,7 @@ "Secrets": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* .", "StartTimeout": "Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a `COMPLETE` , `SUCCESS` , or `HEALTHY` status. If a `startTimeout` value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a `STOPPED` state.\n\n> When the `ECS_CONTAINER_START_TIMEOUT` container agent configuration variable is used, it's enforced independently from this start timeout value. \n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nFor tasks using the EC2 launch type, your container instances require at least version `1.26.0` of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version `1.26.0-1` of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values are 2-120 seconds.", "StopTimeout": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nThe max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.\n\nFor tasks that use the EC2 launch type, if the `stopTimeout` parameter isn't specified, the value set for the Amazon ECS container agent configuration variable `ECS_CONTAINER_STOP_TIMEOUT` is used. If neither the `stopTimeout` parameter or the `ECS_CONTAINER_STOP_TIMEOUT` agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values are 2-120 seconds.", - "SystemControls": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.\n\nWe don't recommend that you specify network-related `systemControls` parameters for multiple containers in a single task that also uses either the `awsvpc` or `host` network mode. Doing this has the following disadvantages:\n\n- For tasks that use the `awsvpc` network mode including Fargate, if you set `systemControls` for any container, it applies to all containers in the task. If you set different `systemControls` for multiple containers in a single task, the container that's started last determines which `systemControls` take effect.\n- For tasks that use the `host` network mode, the network namespace `systemControls` aren't supported.\n\nIf you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see [IPC mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_definition_ipcmode) .\n\n- For tasks that use the `host` IPC mode, IPC namespace `systemControls` aren't supported.\n- For tasks that use the `task` IPC mode, IPC namespace `systemControls` values apply to all containers within a task.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "SystemControls": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "Ulimits": "A list of `ulimits` to set in the container. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Valid naming values are displayed in the [Ulimit](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Ulimit.html) data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", "User": "The user to use inside the container. This parameter maps to `User` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--user` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", "VolumesFrom": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volumes-from` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", @@ -12346,7 +12406,7 @@ "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC." }, "AWS::ECS::TaskSet LoadBalancer": { - "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.", + "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", "ContainerPort": "The port on the container to associate with the load balancer. This port must correspond to a `containerPort` in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they're launched on must allow ingress traffic on the `hostPort` of the port mapping.", "TargetGroupArn": "The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.\n\nA target group ARN is only specified when using an Application Load Balancer or Network Load Balancer.\n\nFor services using the `ECS` deployment controller, you can specify one or multiple target groups. For more information, see [Registering multiple target groups with a service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/register-multiple-targetgroups.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor services using the `CODE_DEPLOY` deployment controller, you're required to define two target groups for the load balancer. For more information, see [Blue/green deployment with CodeDeploy](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-bluegreen.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> If your service's task definition uses the `awsvpc` network mode, you must choose `ip` as the target type, not `instance` . Do this when creating your target groups because tasks that use the `awsvpc` network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type." }, @@ -12398,7 +12458,7 @@ "FileSystemTags": "Use to create one or more tags associated with the file system. Each tag is a user-defined key-value pair. Name your file system on creation by including a `\"Key\":\"Name\",\"Value\":\"{value}\"` key-value pair. Each key must be unique. For more information, see [Tagging AWS resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference Guide* .", "KmsKeyId": "The ID of the AWS KMS key to be used to protect the encrypted file system. This parameter is only required if you want to use a nondefault KMS key . If this parameter is not specified, the default KMS key for Amazon EFS is used. This ID can be in one of the following formats:\n\n- Key ID - A unique identifier of the key, for example `1234abcd-12ab-34cd-56ef-1234567890ab` .\n- ARN - An Amazon Resource Name (ARN) for the key, for example `arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` .\n- Key alias - A previously created display name for a key, for example `alias/projectKey1` .\n- Key alias ARN - An ARN for a key alias, for example `arn:aws:kms:us-west-2:444455556666:alias/projectKey1` .\n\nIf `KmsKeyId` is specified, the `Encrypted` parameter must be set to true.", "LifecyclePolicies": "An array of `LifecyclePolicy` objects that define the file system's `LifecycleConfiguration` object. A `LifecycleConfiguration` object informs Lifecycle management of the following:\n\n- When to move files in the file system from primary storage to IA storage.\n- When to move files in the file system from primary storage or IA storage to Archive storage.\n- When to move files that are in IA or Archive storage to primary storage.\n\n> Amazon EFS requires that each `LifecyclePolicy` object have only a single transition. This means that in a request body, `LifecyclePolicies` needs to be structured as an array of `LifecyclePolicy` objects, one object for each transition, `TransitionToIA` , `TransitionToArchive` `TransitionToPrimaryStorageClass` . See the example requests in the following section for more information.", - "PerformanceMode": "The Performance mode of the file system. We recommend `generalPurpose` performance mode for all file systems. File systems using the `maxIO` performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. The `maxIO` mode is not supported on One Zone file systems.\n\n> Due to the higher per-operation latencies with Max I/O, we recommend using General Purpose performance mode for all file systems. \n\nDefault is `generalPurpose` .", + "PerformanceMode": "The performance mode of the file system. We recommend `generalPurpose` performance mode for all file systems. File systems using the `maxIO` performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. The `maxIO` mode is not supported on One Zone file systems.\n\n> Due to the higher per-operation latencies with Max I/O, we recommend using General Purpose performance mode for all file systems. \n\nDefault is `generalPurpose` .", "ProvisionedThroughputInMibps": "The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if `ThroughputMode` is set to `provisioned` . Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact AWS Support . For more information, see [Amazon EFS quotas that you can increase](https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits) in the *Amazon EFS User Guide* .", "ReplicationConfiguration": "Describes the replication configuration for a specific file system.", "ThroughputMode": "Specifies the throughput mode for the file system. The mode can be `bursting` , `provisioned` , or `elastic` . If you set `ThroughputMode` to `provisioned` , you must also set a value for `ProvisionedThroughputInMibps` . After you create the file system, you can decrease your file system's Provisioned throughput or change between the throughput modes, with certain time restrictions. For more information, see [Specifying throughput with provisioned mode](https://docs.aws.amazon.com/efs/latest/ug/performance.html#provisioned-throughput) in the *Amazon EFS User Guide* .\n\nDefault is `bursting` ." @@ -13686,8 +13746,8 @@ "Name": "The name of the load balancer. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, must not begin or end with a hyphen, and must not begin with \"internal-\".\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID for the load balancer. If you specify a name, you cannot perform updates that require replacement of this resource, but you can perform other updates. To replace the resource, specify a new name.", "Scheme": "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.\n\nThe nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.\n\nThe default is an Internet-facing load balancer.\n\nYou cannot specify a scheme for a Gateway Load Balancer.", "SecurityGroups": "[Application Load Balancers and Network Load Balancers] The IDs of the security groups for the load balancer.", - "SubnetMappings": "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", - "Subnets": "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", + "SubnetMappings": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", + "Subnets": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", "Tags": "The tags to assign to the load balancer.", "Type": "The type of load balancer. The default is `application` ." }, @@ -13740,7 +13800,7 @@ "Port": "The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. If the target type is `alb` , the targeted Application Load Balancer must have at least one listener whose port matches the target group port. This parameter is not used if the target is a Lambda function." }, "AWS::ElasticLoadBalancingV2::TargetGroup TargetGroupAttribute": { - "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", + "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", "Value": "The value of the attribute." }, "AWS::ElasticLoadBalancingV2::TrustStore": { @@ -14499,7 +14559,7 @@ "FileSystemPath": "A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as `/ns1/` ) or subdirectory (such as `/ns1/subdir/` ) that will be mapped 1-1 with `DataRepositoryPath` . The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path `/ns1/` , then you cannot link another data repository with file system path `/ns1/ns2` .\n\nThis path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.\n\n> If you specify only a forward slash ( `/` ) as the file system path, you can link only one data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.", "ImportedFileChunkSize": "For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.\n\nThe default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.", "S3": "The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.", - "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." + "Tags": "A list of `Tag` values, with a maximum of 50 elements." }, "AWS::FSx::DataRepositoryAssociation AutoExportPolicy": { "Events": "The `AutoExportPolicy` can have the following event values:\n\n- `NEW` - New files and directories are automatically exported to the data repository as they are added to the file system.\n- `CHANGED` - Changes to files and directories on the file system are automatically exported to the data repository.\n- `DELETED` - Files and directories are automatically deleted on the data repository when they are deleted on the file system.\n\nYou can define any combination of event types for your `AutoExportPolicy` ." @@ -14524,10 +14584,10 @@ "OntapConfiguration": "The ONTAP configuration properties of the FSx for ONTAP file system that you are creating.", "OpenZFSConfiguration": "The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.", "SecurityGroupIds": "A list of IDs specifying the security groups to apply to all network interfaces created for file system access. This list isn't returned in later requests to describe the file system.\n\n> You must specify a security group if you are creating a Multi-AZ FSx for ONTAP file system in a VPC subnet that has been shared with you.", - "StorageCapacity": "Sets the storage capacity of the file system that you're creating.\n\n`StorageCapacity` is required if you are creating a new file system.\n\n*FSx for Lustre file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` and the Lustre `DeploymentType` , as follows:\n\n- For `SCRATCH_2` , `PERSISTENT_2` and `PERSISTENT_1` deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.\n- For `PERSISTENT_1` HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.\n- For `SCRATCH_1` deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.\n\n*FSx for ONTAP file systems* - The amount of storage capacity that you can configure is from 1024 GiB up to 196,608 GiB (192 TiB).\n\n*FSx for OpenZFS file systems* - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). If you are creating a file system from a backup, you can specify a storage capacity equal to or greater than the original file system's storage capacity.\n\n*FSx for Windows File Server file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` as follows:\n\n- For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).\n- For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).", + "StorageCapacity": "Sets the storage capacity of the file system that you're creating.\n\n`StorageCapacity` is required if you are creating a new file system. It is not required if you are creating a file system by restoring a backup.\n\n*FSx for Lustre file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` and the Lustre `DeploymentType` , as follows:\n\n- For `SCRATCH_2` , `PERSISTENT_2` and `PERSISTENT_1` deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.\n- For `PERSISTENT_1` HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.\n- For `SCRATCH_1` deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.\n\n*FSx for ONTAP file systems* - The amount of storage capacity that you can configure is from 1024 GiB up to 196,608 GiB (192 TiB).\n\n*FSx for OpenZFS file systems* - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). If you are creating a file system from a backup, you can specify a storage capacity equal to or greater than the original file system's storage capacity.\n\n*FSx for Windows File Server file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` as follows:\n\n- For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).\n- For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).", "StorageType": "Sets the storage type for the file system that you're creating. Valid values are `SSD` and `HDD` .\n\n- Set to `SSD` to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.\n- Set to `HDD` to use hard disk drive storage. HDD is supported on `SINGLE_AZ_2` and `MULTI_AZ_1` Windows file system deployment types, and on `PERSISTENT_1` Lustre file system deployment types.\n\nDefault value is `SSD` . For more information, see [Storage type options](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) in the *FSx for Windows File Server User Guide* and [Multiple storage options](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) in the *FSx for Lustre User Guide* .", "SubnetIds": "Specifies the IDs of the subnets that the file system will be accessible from. For Windows and ONTAP `MULTI_AZ_1` deployment types,provide exactly two subnet IDs, one for the preferred file server and one for the standby file server. You specify one of these subnets as the preferred subnet using the `WindowsConfiguration > PreferredSubnetID` or `OntapConfiguration > PreferredSubnetID` properties. For more information about Multi-AZ file system configuration, see [Availability and durability: Single-AZ and Multi-AZ file systems](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html) in the *Amazon FSx for Windows User Guide* and [Availability and durability](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html) in the *Amazon FSx for ONTAP User Guide* .\n\nFor Windows `SINGLE_AZ_1` and `SINGLE_AZ_2` and all Lustre deployment types, provide exactly one subnet ID. The file server is launched in that subnet's Availability Zone.", - "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "Tags": "The tags to associate with the file system. For more information, see [Tagging your Amazon FSx resources](https://docs.aws.amazon.com/fsx/latest/LustreGuide/tag-resources.html) in the *Amazon FSx for Lustre User Guide* .", "WindowsConfiguration": "The configuration object for the Microsoft Windows file system you are creating.\n\nThis value is required if `FileSystemType` is set to `WINDOWS` ." }, "AWS::FSx::FileSystem AuditLogConfiguration": { @@ -14546,7 +14606,7 @@ "AWS::FSx::FileSystem LustreConfiguration": { "AutoImportPolicy": "(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. `AutoImportPolicy` can have the following values:\n\n- `NONE` - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.\n- `NEW` - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.\n- `NEW_CHANGED` - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.\n- `NEW_CHANGED_DELETED` - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.\n\nFor more information, see [Automatically import updates from your S3 bucket](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) .\n\n> This parameter is not supported for Lustre file systems with a data repository association.", "AutomaticBackupRetentionDays": "The number of days to retain automatic backups. Setting this property to `0` disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is `0` .", - "CopyTagsToBackups": "A Boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. Only valid for use with `PERSISTENT_1` deployment types.", + "CopyTagsToBackups": "(Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If `CopyTagsToBackups` is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If `CopyTagsToBackups` is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.\n\n(Default = `false` )\n\nFor more information, see [Working with backups](https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html) in the *Amazon FSx for Lustre User Guide* .", "DailyAutomaticBackupStartTime": "A recurring daily time, in the format `HH:MM` . `HH` is the zero-padded hour of the day (0-23), and `MM` is the zero-padded minute of the hour. For example, `05:00` specifies 5 AM daily.", "DataCompressionType": "Sets the data compression configuration for the file system. `DataCompressionType` can have the following values:\n\n- `NONE` - (Default) Data compression is turned off when the file system is created.\n- `LZ4` - Data compression is turned on with the LZ4 algorithm.\n\nFor more information, see [Lustre data compression](https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html) in the *Amazon FSx for Lustre User Guide* .", "DeploymentType": "(Optional) Choose `SCRATCH_1` and `SCRATCH_2` deployment types when you need temporary storage and shorter-term processing of data. The `SCRATCH_2` deployment type provides in-transit encryption of data and higher burst throughput capacity than `SCRATCH_1` .\n\nChoose `PERSISTENT_1` for longer-term storage and for throughput-focused workloads that aren\u2019t latency-sensitive. `PERSISTENT_1` supports encryption of data in transit, and is available in all AWS Regions in which FSx for Lustre is available.\n\nChoose `PERSISTENT_2` for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. `PERSISTENT_2` supports SSD storage, and offers higher `PerUnitStorageThroughput` (up to 1000 MB/s/TiB). `PERSISTENT_2` is available in a limited number of AWS Regions . For more information, and an up-to-date list of AWS Regions in which `PERSISTENT_2` is available, see [File system deployment options for FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-fsx-lustre.html#lustre-deployment-types) in the *Amazon FSx for Lustre User Guide* .\n\n> If you choose `PERSISTENT_2` , and you set `FileSystemTypeVersion` to `2.10` , the `CreateFileSystem` operation fails. \n\nEncryption of data in transit is automatically turned on when you access `SCRATCH_2` , `PERSISTENT_1` and `PERSISTENT_2` file systems from Amazon EC2 instances that support automatic encryption in the AWS Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see [Encrypting data in transit](https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html) in the *Amazon FSx for Lustre User Guide* .\n\n(Default = `SCRATCH_1` )", @@ -14567,11 +14627,11 @@ "DiskIopsConfiguration": "The SSD IOPS configuration for the FSx for ONTAP file system.", "EndpointIpAddressRange": "(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC\u2019s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.", "FsxAdminPassword": "The ONTAP administrative password for the `fsxadmin` user with which you administer your file system using the NetApp ONTAP CLI and REST API.", - "HAPairs": "", + "HAPairs": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "PreferredSubnetId": "Required when `DeploymentType` is set to `MULTI_AZ_1` . This specifies the subnet in which you want the preferred file server to be located.", "RouteTableIds": "(Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.\n\n> Amazon FSx manages these route tables for Multi-AZ file systems using tag-based authentication. These route tables are tagged with `Key: AmazonFSx; Value: ManagedByAmazonFSx` . When creating FSx for ONTAP Multi-AZ file systems using AWS CloudFormation we recommend that you add the `Key: AmazonFSx; Value: ManagedByAmazonFSx` tag manually.", "ThroughputCapacity": "Sets the throughput capacity for the file system that you're creating in megabytes per second (MBps). For more information, see [Managing throughput capacity](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-throughput-capacity.html) in the FSx for ONTAP User Guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value.\n- The value of `ThroughputCapacity` when divided by the value of `HAPairs` is outside of the valid range for `ThroughputCapacity` .", - "ThroughputCapacityPerHAPair": "", + "ThroughputCapacityPerHAPair": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "WeeklyMaintenanceStartTime": "A recurring weekly time, in the format `D:HH:MM` .\n\n`D` is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see [the ISO-8601 spec as described on Wikipedia](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/ISO_week_date) .\n\n`HH` is the zero-padded hour of the day (0-23), and `MM` is the zero-padded minute of the hour.\n\nFor example, `1:05:00` specifies maintenance at 5 AM Monday." }, "AWS::FSx::FileSystem OpenZFSConfiguration": { @@ -14630,7 +14690,7 @@ }, "AWS::FSx::Snapshot": { "Name": "The name of the snapshot.", - "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "Tags": "A list of `Tag` values, with a maximum of 50 elements.", "VolumeId": "The ID of the volume that the snapshot is of." }, "AWS::FSx::Snapshot Tag": { @@ -14641,13 +14701,13 @@ "ActiveDirectoryConfiguration": "Describes the Microsoft Active Directory configuration to which the SVM is joined, if applicable.", "FileSystemId": "Specifies the FSx for ONTAP file system on which to create the SVM.", "Name": "The name of the SVM.", - "RootVolumeSecurityStyle": "The security style of the root volume of the SVM. Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.", + "RootVolumeSecurityStyle": "The security style of the root volume of the SVM. Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Microsoft Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Microsoft Windows user as the service account.\n- `MIXED` This is an advanced setting. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style.html) in the Amazon FSx for NetApp ONTAP User Guide.", "SvmAdminPassword": "Specifies the password to use when logging on to the SVM using a secure shell (SSH) connection to the SVM's management endpoint. Doing so enables you to manage the SVM using the NetApp ONTAP CLI or REST API. If you do not specify a password, you can still use the file system's `fsxadmin` user to manage the SVM. For more information, see [Managing SVMs using the NetApp ONTAP CLI](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-resources-ontap-apps.html#vsadmin-ontap-cli) in the *FSx for ONTAP User Guide* .", - "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." + "Tags": "A list of `Tag` values, with a maximum of 50 elements." }, "AWS::FSx::StorageVirtualMachine ActiveDirectoryConfiguration": { "NetBiosName": "The NetBIOS name of the Active Directory computer object that will be created for your SVM.", - "SelfManagedActiveDirectoryConfiguration": "The configuration that Amazon FSx uses to join the ONTAP storage virtual machine (SVM) to your self-managed (including on-premises) Microsoft Active Directory (AD) directory." + "SelfManagedActiveDirectoryConfiguration": "The configuration that Amazon FSx uses to join the ONTAP storage virtual machine (SVM) to your self-managed (including on-premises) Microsoft Active Directory directory." }, "AWS::FSx::StorageVirtualMachine SelfManagedActiveDirectoryConfiguration": { "DnsIps": "A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.", @@ -15731,6 +15791,17 @@ "ViewExpandedText": "Included for Apache Hive compatibility. Not used in the normal course of AWS Glue operations.", "ViewOriginalText": "Included for Apache Hive compatibility. Not used in the normal course of AWS Glue operations. If the table is a `VIRTUAL_VIEW` , certain Athena configuration encoded in base64." }, + "AWS::Glue::TableOptimizer": { + "CatalogId": "", + "DatabaseName": "The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.", + "TableName": "The table name. For Hive compatibility, this must be entirely lowercase.", + "TableOptimizerConfiguration": "", + "Type": "" + }, + "AWS::Glue::TableOptimizer TableOptimizerConfiguration": { + "Enabled": "", + "RoleArn": "" + }, "AWS::Glue::Trigger": { "Actions": "The actions initiated by this trigger.", "Description": "A description of this trigger.", @@ -16960,7 +17031,7 @@ "ImageScanningConfiguration": "Contains settings for vulnerability scans.", "ImageTestsConfiguration": "The configuration settings for your image test components, which includes a toggle that allows you to turn off tests, and a timeout setting.", "InfrastructureConfigurationArn": "The Amazon Resource Name (ARN) of the infrastructure configuration associated with this image pipeline.", - "Tags": "The tags of the image.", + "Tags": "The tags that apply to this image.", "Workflows": "Contains the build and test workflows that are associated with the image." }, "AWS::ImageBuilder::Image EcrConfiguration": { @@ -17097,67 +17168,67 @@ }, "AWS::ImageBuilder::LifecyclePolicy": { "Description": "Optional description for the lifecycle policy.", - "ExecutionRole": "The name or Amazon Resource Name (ARN) of the IAM role that Image Builder uses to run the lifecycle policy. This is a custom role that you create.", - "Name": "The name of the lifecycle policy.", - "PolicyDetails": "The configuration details for a lifecycle policy resource.", - "ResourceSelection": "Resource selection criteria used to run the lifecycle policy.", - "ResourceType": "The type of resources the lifecycle policy targets.", + "ExecutionRole": "The name or Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access to run lifecycle actions.", + "Name": "The name of the lifecycle policy to create.", + "PolicyDetails": "Configuration details for the lifecycle policy rules.", + "ResourceSelection": "Selection criteria for the resources that the lifecycle policy applies to.", + "ResourceType": "The type of Image Builder resource that the lifecycle policy applies to.", "Status": "Indicates whether the lifecycle policy resource is enabled.", - "Tags": "To help manage your lifecycle policy resources, you can assign your own metadata to each resource in the form of tags. Each tag consists of a key and an optional value, both of which you define." + "Tags": "Tags to apply to the lifecycle policy resource." }, "AWS::ImageBuilder::LifecyclePolicy Action": { - "IncludeResources": "", - "Type": "" + "IncludeResources": "Specifies the resources that the lifecycle policy applies to.", + "Type": "Specifies the lifecycle action to take." }, "AWS::ImageBuilder::LifecyclePolicy AmiExclusionRules": { - "IsPublic": "", - "LastLaunched": "", - "Regions": "", - "SharedAccounts": "", - "TagMap": "" + "IsPublic": "Configures whether public AMIs are excluded from the lifecycle action.", + "LastLaunched": "Specifies configuration details for Image Builder to exclude the most recent resources from lifecycle actions.", + "Regions": "Configures AWS Region s that are excluded from the lifecycle action.", + "SharedAccounts": "Specifies AWS account s whose resources are excluded from the lifecycle action.", + "TagMap": "Lists tags that should be excluded from lifecycle actions for the AMIs that have them." }, "AWS::ImageBuilder::LifecyclePolicy ExclusionRules": { - "Amis": "", - "TagMap": "" + "Amis": "Lists configuration values that apply to AMIs that Image Builder should exclude from the lifecycle action.", + "TagMap": "Contains a list of tags that Image Builder uses to skip lifecycle actions for Image Builder image resources that have them." }, "AWS::ImageBuilder::LifecyclePolicy Filter": { - "RetainAtLeast": "", - "Type": "", - "Unit": "", - "Value": "" + "RetainAtLeast": "For age-based filters, this is the number of resources to keep on hand after the lifecycle `DELETE` action is applied. Impacted resources are only deleted if you have more than this number of resources. If you have fewer resources than this number, the impacted resource is not deleted.", + "Type": "Filter resources based on either `age` or `count` .", + "Unit": "Defines the unit of time that the lifecycle policy uses to determine impacted resources. This is required for age-based rules.", + "Value": "The number of units for the time period or for the count. For example, a value of `6` might refer to six months or six AMIs.\n\n> For count-based filters, this value represents the minimum number of resources to keep on hand. If you have fewer resources than this number, the resource is excluded from lifecycle actions." }, "AWS::ImageBuilder::LifecyclePolicy IncludeResources": { - "Amis": "", - "Containers": "", - "Snapshots": "" + "Amis": "Specifies whether the lifecycle action should apply to distributed AMIs.", + "Containers": "Specifies whether the lifecycle action should apply to distributed containers.", + "Snapshots": "Specifies whether the lifecycle action should apply to snapshots associated with distributed AMIs." }, "AWS::ImageBuilder::LifecyclePolicy LastLaunched": { - "Unit": "", - "Value": "" + "Unit": "Defines the unit of time that the lifecycle policy uses to calculate elapsed time since the last instance launched from the AMI. For example: days, weeks, months, or years.", + "Value": "The integer number of units for the time period. For example `6` (months)." }, "AWS::ImageBuilder::LifecyclePolicy PolicyDetail": { - "Action": "", - "ExclusionRules": "", - "Filter": "" + "Action": "Configuration details for the policy action.", + "ExclusionRules": "Additional rules to specify resources that should be exempt from policy actions.", + "Filter": "Specifies the resources that the lifecycle policy applies to." }, "AWS::ImageBuilder::LifecyclePolicy RecipeSelection": { - "Name": "", - "SemanticVersion": "" + "Name": "The name of an Image Builder recipe that the lifecycle policy uses for resource selection.", + "SemanticVersion": "The version of the Image Builder recipe specified by the `name` field." }, "AWS::ImageBuilder::LifecyclePolicy ResourceSelection": { - "Recipes": "", - "TagMap": "" + "Recipes": "A list of recipes that are used as selection criteria for the output images that the lifecycle policy applies to.", + "TagMap": "A list of tags that are used as selection criteria for the Image Builder image resources that the lifecycle policy applies to." }, "AWS::ImageBuilder::Workflow": { "ChangeDescription": "Describes what change has been made in this version of the workflow, or what makes this version different from other versions of the workflow.", - "Data": "Contains the YAML document content for the workflow.", - "Description": "The description of the workflow.", - "KmsKeyId": "The KMS key identifier used to encrypt the workflow resource.", - "Name": "The name of the workflow resource.", - "Tags": "The tags that apply to the workflow resource", - "Type": "Specifies the image creation stage that the workflow applies to. Image Builder currently supports build and test workflows.", - "Uri": "", - "Version": "The workflow resource version. Workflow resources are immutable. To make a change, you can clone a workflow or create a new version." + "Data": "Contains the UTF-8 encoded YAML document content for the workflow. Alternatively, you can specify the `uri` of a YAML document file stored in Amazon S3. However, you cannot specify both properties.", + "Description": "Describes the workflow.", + "KmsKeyId": "The ID of the KMS key that is used to encrypt this workflow resource.", + "Name": "The name of the workflow to create.", + "Tags": "Tags that apply to the workflow resource.", + "Type": "The phase in the image build process for which the workflow resource is responsible.", + "Uri": "The `uri` of a YAML component document file. This must be an S3 URL ( `s3://bucket/key` ), and the requester must have permission to access the S3 bucket it points to. If you use Amazon S3, you can specify component content up to your service quota.\n\nAlternatively, you can specify the YAML document inline, using the component `data` property. You cannot specify both properties.", + "Version": "The semantic version of this workflow resource. The semantic version syntax adheres to the following rules.\n\n> The semantic version has four nodes: ../. You can assign values for the first three, and can filter on all of them.\n> \n> *Assignment:* For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.\n> \n> *Patterns:* You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01." }, "AWS::Inspector::AssessmentTarget": { "AssessmentTargetName": "The name of the Amazon Inspector assessment target. The name must be unique within the AWS account .", @@ -17181,6 +17252,38 @@ "Key": "A tag key.", "Value": "A value assigned to a tag key." }, + "AWS::InspectorV2::CisScanConfiguration": { + "ScanName": "The name of the CIS scan configuration.", + "Schedule": "The CIS scan configuration's schedule.", + "SecurityLevel": "The CIS scan configuration's CIS Benchmark level.", + "Tags": "The CIS scan configuration's tags.", + "Targets": "The CIS scan configuration's targets." + }, + "AWS::InspectorV2::CisScanConfiguration CisTargets": { + "AccountIds": "The CIS target account ids.", + "TargetResourceTags": "The CIS target resource tags." + }, + "AWS::InspectorV2::CisScanConfiguration DailySchedule": { + "StartTime": "The schedule start time." + }, + "AWS::InspectorV2::CisScanConfiguration MonthlySchedule": { + "Day": "The monthly schedule's day.", + "StartTime": "The monthly schedule's start time." + }, + "AWS::InspectorV2::CisScanConfiguration Schedule": { + "Daily": "A daily schedule.", + "Monthly": "A monthly schedule.", + "OneTime": "A one time schedule.", + "Weekly": "A weekly schedule." + }, + "AWS::InspectorV2::CisScanConfiguration Time": { + "TimeOfDay": "The time of day in 24-hour format (00:00).", + "TimeZone": "The timezone." + }, + "AWS::InspectorV2::CisScanConfiguration WeeklySchedule": { + "Days": "The weekly schedule's days.", + "StartTime": "The weekly schedule's start time." + }, "AWS::InspectorV2::Filter": { "Description": "A description of the filter.", "FilterAction": "The action that is to be applied to the findings that match the filter.", @@ -17426,6 +17529,7 @@ "DomainConfigurationStatus": "The status to which the domain configuration should be updated.\n\nValid values: `ENABLED` | `DISABLED`", "DomainName": "The name of the domain.", "ServerCertificateArns": "The ARNs of the certificates that AWS IoT passes to the device during the TLS handshake. Currently you can specify only one certificate ARN. This value is not required for AWS -managed domains.", + "ServerCertificateConfig": "The server certificate configuration.\n\nFor more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", "ServiceType": "The type of service delivered by the endpoint.\n\n> AWS IoT Core currently supports only the `DATA` service type.", "Tags": "Metadata which can be used to manage the domain configuration.\n\n> For URI Request parameters use format: ...key1=value1&key2=value2...\n> \n> For the CLI command-line parameter use format: &&tags \"key1=value1&key2=value2...\"\n> \n> For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"", "TlsConfig": "An object that specifies the TLS configuration for a domain.", @@ -17435,6 +17539,9 @@ "AllowAuthorizerOverride": "A Boolean that specifies whether the domain configuration's authorization service can be overridden.", "DefaultAuthorizerName": "The name of the authorization service for a domain configuration." }, + "AWS::IoT::DomainConfiguration ServerCertificateConfig": { + "EnableOCSPCheck": "A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not. For more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide." + }, "AWS::IoT::DomainConfiguration ServerCertificateSummary": { "ServerCertificateArn": "The ARN of the server certificate.", "ServerCertificateStatus": "The status of the server certificate.", @@ -19161,8 +19268,8 @@ "Tags": "The tags are an array of key-value pairs to attach to the specified resource. Tags can have a minimum of 0 and a maximum of 50 items." }, "AWS::IoTWireless::Destination Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::DeviceProfile": { "LoRaWAN": "LoRaWAN device profile object.", @@ -19191,8 +19298,8 @@ "SupportsJoin": "The SupportsJoin value." }, "AWS::IoTWireless::DeviceProfile Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::FuotaTask": { "AssociateMulticastGroup": "The ID of the multicast group to associate with a FUOTA task.", @@ -19211,8 +19318,8 @@ "StartTime": "Start time of a FUOTA task." }, "AWS::IoTWireless::FuotaTask Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::MulticastGroup": { "AssociateWirelessDevice": "The ID of the wireless device to associate with a multicast group.", @@ -19229,8 +19336,8 @@ "RfRegion": "The frequency band (RFRegion) value." }, "AWS::IoTWireless::MulticastGroup Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::NetworkAnalyzerConfiguration": { "Description": "The description of the resource.", @@ -19241,8 +19348,8 @@ "WirelessGateways": "Wireless gateway resources to add to the network analyzer configuration. Provide the `WirelessGatewayId` of the resource to add in the input array." }, "AWS::IoTWireless::NetworkAnalyzerConfiguration Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::NetworkAnalyzerConfiguration TraceContent": { "LogLevel": "The log level for a log message. The log levels can be disabled, or set to `ERROR` to display less verbose logs containing only error information, or to `INFO` for more detailed logs", @@ -19253,7 +19360,7 @@ "PartnerAccountId": "The ID of the partner account to update.", "PartnerType": "The partner type.", "Sidewalk": "The Sidewalk account credentials.", - "SidewalkResponse": "", + "SidewalkResponse": "Information about a Sidewalk account.", "SidewalkUpdate": "Sidewalk update.", "Tags": "The tags are an array of key-value pairs to attach to the specified resource. Tags can have a minimum of 0 and a maximum of 50 items." }, @@ -19269,8 +19376,8 @@ "AppServerPrivateKey": "The new Sidewalk application server private key." }, "AWS::IoTWireless::PartnerAccount Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::ServiceProfile": { "LoRaWAN": "LoRaWAN service profile object.", @@ -19299,8 +19406,8 @@ "UlRatePolicy": "The ULRatePolicy value.\n\nThis property is `ReadOnly` and can't be inputted for create. It's returned with `Fn::GetAtt`" }, "AWS::IoTWireless::ServiceProfile Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::TaskDefinition": { "AutoCreateTasks": "Whether to automatically create tasks using this task definition for all gateways with the specified current version. If `false` , the task must be created by calling `CreateWirelessGatewayTask` .", @@ -19326,8 +19433,8 @@ "UpdateVersion": "The firmware version to update the gateway to." }, "AWS::IoTWireless::TaskDefinition Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::TaskDefinition UpdateWirelessGatewayTaskCreate": { "LoRaWAN": "The properties that relate to the LoRaWAN wireless gateway.", @@ -19345,15 +19452,15 @@ "Type": "The wireless device type." }, "AWS::IoTWireless::WirelessDevice AbpV10x": { - "DevAddr": "", - "SessionKeys": "" + "DevAddr": "The DevAddr value.", + "SessionKeys": "Session keys for ABP v1.0.x." }, "AWS::IoTWireless::WirelessDevice AbpV11": { "DevAddr": "The DevAddr value.", "SessionKeys": "Session keys for ABP v1.1." }, "AWS::IoTWireless::WirelessDevice LoRaWANDevice": { - "AbpV10x": "", + "AbpV10x": "ABP device object for LoRaWAN specification v1.0.x.", "AbpV11": "ABP device object for create APIs for v1.1.", "DevEui": "The DevEUI value.", "DeviceProfileId": "The ID of the device profile for the new wireless device.", @@ -19362,8 +19469,8 @@ "ServiceProfileId": "The ID of the service profile." }, "AWS::IoTWireless::WirelessDevice OtaaV10x": { - "AppEui": "", - "AppKey": "" + "AppEui": "The AppEUI value. You specify this value when using LoRaWAN versions v1.0.2 or v1.0.3.", + "AppKey": "The AppKey value." }, "AWS::IoTWireless::WirelessDevice OtaaV11": { "AppKey": "The AppKey is a secret key, which you should handle in a similar way as you would an application password. You can protect the AppKey value by storing it in the AWS Secrets Manager and use the [secretsmanager](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html#dynamic-references-secretsmanager) to reference this value.", @@ -19371,8 +19478,8 @@ "NwkKey": "The NwkKey is a secret key, which you should handle in a similar way as you would an application password. You can protect the NwkKey value by storing it in the AWS Secrets Manager and use the [secretsmanager](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html#dynamic-references-secretsmanager) to reference this value." }, "AWS::IoTWireless::WirelessDevice SessionKeysAbpV10x": { - "AppSKey": "", - "NwkSKey": "" + "AppSKey": "The AppSKey value.", + "NwkSKey": "The NwkKey value." }, "AWS::IoTWireless::WirelessDevice SessionKeysAbpV11": { "AppSKey": "The AppSKey is a secret key, which you should handle in a similar way as you would an application password. You can protect the AppSKey value by storing it in the AWS Secrets Manager and use the [secretsmanager](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html#dynamic-references-secretsmanager) to reference this value.", @@ -19381,8 +19488,8 @@ "SNwkSIntKey": "The SNwkSIntKey is a secret key, which you should handle in a similar way as you would an application password. You can protect the SNwkSIntKey value by storing it in the AWS Secrets Manager and use the [secretsmanager](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html#dynamic-references-secretsmanager) to reference this value." }, "AWS::IoTWireless::WirelessDevice Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::WirelessDeviceImportTask": { "DestinationName": "The name of the destination that describes the IoT rule to route messages from the Sidewalk devices in the import task to other applications.", @@ -19396,8 +19503,8 @@ "SidewalkManufacturingSn": "The Sidewalk manufacturing serial number (SMSN) of the Sidewalk device." }, "AWS::IoTWireless::WirelessDeviceImportTask Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::IoTWireless::WirelessGateway": { "Description": "The description of the new resource. The maximum length is 2048 characters.", @@ -19413,8 +19520,8 @@ "RfRegion": "The frequency band (RFRegion) value." }, "AWS::IoTWireless::WirelessGateway Tag": { - "Key": "", - "Value": "" + "Key": "The tag's key value.", + "Value": "The tag's value." }, "AWS::KMS::Alias": { "AliasName": "Specifies the alias name. This value must begin with `alias/` followed by a name, such as `alias/ExampleAlias` .\n\n> If you change the value of the `AliasName` property, the existing alias is deleted and a new alias is created for the specified KMS key. This change can disrupt applications that use the alias. It can also allow or deny access to a KMS key affected by attribute-based access control (ABAC). \n\nThe alias must be string of 1-256 characters. It can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). The alias name cannot begin with `alias/aws/` . The `alias/aws/` prefix is reserved for [AWS managed keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) .", @@ -20334,7 +20441,7 @@ "MSKSourceConfiguration": "The configuration for the Amazon MSK cluster to be used as the source for a delivery stream.", "RedshiftDestinationConfiguration": "An Amazon Redshift destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon Redshift destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "S3DestinationConfiguration": "The `S3DestinationConfiguration` property type specifies an Amazon Simple Storage Service (Amazon S3) destination to which Amazon Kinesis Data Firehose (Kinesis Data Firehose) delivers data.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", - "SnowflakeDestinationConfiguration": "", + "SnowflakeDestinationConfiguration": "Configure Snowflake destination", "SplunkDestinationConfiguration": "The configuration of a destination in Splunk for the delivery stream.", "Tags": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream." }, @@ -20348,14 +20455,14 @@ "CollectionEndpoint": "The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service.", "IndexName": "The Serverless offering for Amazon OpenSearch Service index name.", "ProcessingConfiguration": "", - "RetryOptions": "The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).", - "RoleARN": "The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.", - "S3BackupMode": "Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.", + "RetryOptions": "The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).", + "RoleARN": "The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.", + "S3BackupMode": "Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.", "S3Configuration": "", "VpcConfiguration": "" }, "AWS::KinesisFirehose::DeliveryStream AmazonOpenSearchServerlessRetryOptions": { - "DurationInSeconds": "After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries." + "DurationInSeconds": "After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries." }, "AWS::KinesisFirehose::DeliveryStream AmazonopensearchserviceBufferingHints": { "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).", @@ -20365,7 +20472,7 @@ "BufferingHints": "The buffering options. If no value is specified, the default values for AmazonopensearchserviceBufferingHints are used.", "CloudWatchLoggingOptions": "Describes the Amazon CloudWatch logging options for your delivery stream.", "ClusterEndpoint": "The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.", - "DocumentIdOptions": "Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.", + "DocumentIdOptions": "Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.", "DomainARN": "The ARN of the Amazon OpenSearch Service domain.", "IndexName": "The Amazon OpenSearch Service index name.", "IndexRotationPeriod": "The Amazon OpenSearch Service index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data.", @@ -20400,20 +20507,20 @@ }, "AWS::KinesisFirehose::DeliveryStream DataFormatConversionConfiguration": { "Enabled": "Defaults to `true` . Set it to `false` if you want to disable format conversion while preserving the configuration details.", - "InputFormatConfiguration": "Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if `Enabled` is set to true.", - "OutputFormatConfiguration": "Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if `Enabled` is set to true.", + "InputFormatConfiguration": "Specifies the deserializer that you want Firehose to use to convert the format of your data from JSON. This parameter is required if `Enabled` is set to true.", + "OutputFormatConfiguration": "Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if `Enabled` is set to true.", "SchemaConfiguration": "Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if `Enabled` is set to true." }, "AWS::KinesisFirehose::DeliveryStream DeliveryStreamEncryptionConfigurationInput": { - "KeyARN": "If you set `KeyType` to `CUSTOMER_MANAGED_CMK` , you must specify the Amazon Resource Name (ARN) of the CMK. If you set `KeyType` to `AWS _OWNED_CMK` , Kinesis Data Firehose uses a service-account CMK.", + "KeyARN": "If you set `KeyType` to `CUSTOMER_MANAGED_CMK` , you must specify the Amazon Resource Name (ARN) of the CMK. If you set `KeyType` to `AWS _OWNED_CMK` , Firehose uses a service-account CMK.", "KeyType": "Indicates the type of customer master key (CMK) to use for encryption. The default setting is `AWS_OWNED_CMK` . For more information about CMKs, see [Customer Master Keys (CMKs)](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys) .\n\nYou can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams.\n\n> To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see [About Symmetric and Asymmetric CMKs](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html) in the AWS Key Management Service developer guide." }, "AWS::KinesisFirehose::DeliveryStream Deserializer": { - "HiveJsonSerDe": "The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", - "OpenXJsonSerDe": "The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe." + "HiveJsonSerDe": "The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", + "OpenXJsonSerDe": "The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe." }, "AWS::KinesisFirehose::DeliveryStream DocumentIdOptions": { - "DefaultDocumentIdFormat": "When the `FIREHOSE_DEFAULT` option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance." + "DefaultDocumentIdFormat": "When the `FIREHOSE_DEFAULT` option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance." }, "AWS::KinesisFirehose::DeliveryStream DynamicPartitioningConfiguration": { "Enabled": "Specifies whether dynamic partitioning is enabled for this Kinesis Data Firehose delivery stream.", @@ -20427,7 +20534,7 @@ "BufferingHints": "Configures how Kinesis Data Firehose buffers incoming data while delivering it to the Amazon ES domain.", "CloudWatchLoggingOptions": "The Amazon CloudWatch Logs logging options for the delivery stream.", "ClusterEndpoint": "The endpoint to use when communicating with the cluster. Specify either this `ClusterEndpoint` or the `DomainARN` field.", - "DocumentIdOptions": "Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.", + "DocumentIdOptions": "Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.", "DomainARN": "The ARN of the Amazon ES domain. The IAM role must have permissions for `DescribeElasticsearchDomain` , `DescribeElasticsearchDomains` , and `DescribeElasticsearchDomainConfig` after assuming the role specified in *RoleARN* .\n\nSpecify either `ClusterEndpoint` or `DomainARN` .", "IndexName": "The name of the Elasticsearch index to which Kinesis Data Firehose adds data for indexing.", "IndexRotationPeriod": "The frequency of Elasticsearch index rotation. If you enable index rotation, Kinesis Data Firehose appends a portion of the UTC arrival timestamp to the specified index name, and rotates the appended timestamp accordingly. For more information, see [Index Rotation for the Amazon ES Destination](https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation) in the *Amazon Kinesis Data Firehose Developer Guide* .", @@ -20462,7 +20569,7 @@ "S3BackupMode": "The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it." }, "AWS::KinesisFirehose::DeliveryStream HiveJsonSerDe": { - "TimestampFormats": "Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://docs.aws.amazon.com/https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) . You can also use the special value `millis` to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses `java.sql.Timestamp::valueOf` by default." + "TimestampFormats": "Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://docs.aws.amazon.com/https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) . You can also use the special value `millis` to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses `java.sql.Timestamp::valueOf` by default." }, "AWS::KinesisFirehose::DeliveryStream HttpEndpointCommonAttribute": { "AttributeName": "The name of the HTTP endpoint common attribute.", @@ -20504,13 +20611,13 @@ "TopicName": "The topic name within the Amazon MSK cluster." }, "AWS::KinesisFirehose::DeliveryStream OpenXJsonSerDe": { - "CaseInsensitive": "When set to `true` , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.", + "CaseInsensitive": "When set to `true` , which is the default, Firehose converts JSON keys to lowercase before deserializing them.", "ColumnToJsonKeyMappings": "Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, `timestamp` is a Hive keyword. If you have a JSON key named `timestamp` , set this parameter to `{\"ts\": \"timestamp\"}` to map this key to a column named `ts` .", - "ConvertDotsInJsonKeysToUnderscores": "When set to `true` , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.\n\nThe default is `false` ." + "ConvertDotsInJsonKeysToUnderscores": "When set to `true` , specifies that the names of the keys include dots and that you want Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.\n\nThe default is `false` ." }, "AWS::KinesisFirehose::DeliveryStream OrcSerDe": { - "BlockSizeBytes": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.", - "BloomFilterColumns": "The column names for which you want Kinesis Data Firehose to create bloom filters. The default is `null` .", + "BlockSizeBytes": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.", + "BloomFilterColumns": "The column names for which you want Firehose to create bloom filters. The default is `null` .", "BloomFilterFalsePositiveProbability": "The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.", "Compression": "The compression code to use over data blocks. The default is `SNAPPY` .", "DictionaryKeyThreshold": "Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.", @@ -20524,7 +20631,7 @@ "Serializer": "Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request." }, "AWS::KinesisFirehose::DeliveryStream ParquetSerDe": { - "BlockSizeBytes": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.", + "BlockSizeBytes": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.", "Compression": "The compression code to use over data blocks. The possible values are `UNCOMPRESSED` , `SNAPPY` , and `GZIP` , with the default being `SNAPPY` . Use `SNAPPY` for higher decompression speed. Use `GZIP` if the compression ratio is more important than speed.", "EnableDictionaryCompression": "Indicates whether to enable dictionary compression.", "MaxPaddingBytes": "The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.", @@ -20549,7 +20656,7 @@ "CopyCommand": "Configures the Amazon Redshift `COPY` command that Kinesis Data Firehose uses to load data into the cluster from the Amazon S3 bucket.", "Password": "The password for the Amazon Redshift user that you specified in the `Username` property.", "ProcessingConfiguration": "The data processing configuration for the Kinesis Data Firehose delivery stream.", - "RetryOptions": "The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).", + "RetryOptions": "The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).", "RoleARN": "The ARN of the AWS Identity and Access Management (IAM) role that grants Kinesis Data Firehose access to your Amazon S3 bucket and AWS KMS (if you enable data encryption). For more information, see [Grant Kinesis Data Firehose Access to an Amazon Redshift Destination](https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-rs) in the *Amazon Kinesis Data Firehose Developer Guide* .", "S3BackupConfiguration": "The configuration for backup in Amazon S3.", "S3BackupMode": "The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.", @@ -20557,7 +20664,7 @@ "Username": "The Amazon Redshift user that has permission to access the Amazon Redshift cluster. This user must have `INSERT` privileges for copying data from the Amazon S3 bucket to the cluster." }, "AWS::KinesisFirehose::DeliveryStream RedshiftRetryOptions": { - "DurationInSeconds": "The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of `DurationInSeconds` is 0 (zero) or if the first delivery attempt takes longer than the current value." + "DurationInSeconds": "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of `DurationInSeconds` is 0 (zero) or if the first delivery attempt takes longer than the current value." }, "AWS::KinesisFirehose::DeliveryStream RetryOptions": { "DurationInSeconds": "The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from the specified destination after each attempt." @@ -20576,43 +20683,43 @@ "CatalogId": "The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account ID is used by default.", "DatabaseName": "Specifies the name of the AWS Glue database that contains the schema for the output data.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `DatabaseName` property is required and its value must be specified.", "Region": "If you don't specify an AWS Region, the default is the current Region.", - "RoleARN": "The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `RoleARN` property is required and its value must be specified.", + "RoleARN": "The role that Firehose can use to access AWS Glue. This role must be in the same account you use for Firehose. Cross-account roles aren't allowed.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `RoleARN` property is required and its value must be specified.", "TableName": "Specifies the AWS Glue table that contains the column information that constitutes your data schema.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `TableName` property is required and its value must be specified.", - "VersionId": "Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to `LATEST` , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up." + "VersionId": "Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to `LATEST` , Firehose uses the most recent version. This means that any updates to the table are automatically picked up." }, "AWS::KinesisFirehose::DeliveryStream Serializer": { "OrcSerDe": "A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see [Apache ORC](https://docs.aws.amazon.com/https://orc.apache.org/docs/) .", "ParquetSerDe": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/documentation/latest/) ." }, "AWS::KinesisFirehose::DeliveryStream SnowflakeDestinationConfiguration": { - "AccountUrl": "", + "AccountUrl": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", "CloudWatchLoggingOptions": "", - "ContentColumnName": "", - "DataLoadingOption": "", - "Database": "", - "KeyPassphrase": "", - "MetaDataColumnName": "", - "PrivateKey": "", + "ContentColumnName": "The name of the record content column", + "DataLoadingOption": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", + "Database": "All data in Snowflake is maintained in databases.", + "KeyPassphrase": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "MetaDataColumnName": "The name of the record metadata column", + "PrivateKey": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", "ProcessingConfiguration": "", - "RetryOptions": "", - "RoleARN": "", - "S3BackupMode": "", + "RetryOptions": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "RoleARN": "The Amazon Resource Name (ARN) of the Snowflake role", + "S3BackupMode": "Choose an S3 backup mode", "S3Configuration": "", - "Schema": "", - "SnowflakeRoleConfiguration": "", - "SnowflakeVpcConfiguration": "", - "Table": "", - "User": "" + "Schema": "Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views", + "SnowflakeRoleConfiguration": "Optionally configure a Snowflake role. Otherwise the default user role will be used.", + "SnowflakeVpcConfiguration": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "Table": "All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.", + "User": "User login name for the Snowflake account." }, "AWS::KinesisFirehose::DeliveryStream SnowflakeRetryOptions": { - "DurationInSeconds": "" + "DurationInSeconds": "the time period where Firehose will retry sending data to the chosen HTTP endpoint." }, "AWS::KinesisFirehose::DeliveryStream SnowflakeRoleConfiguration": { - "Enabled": "", - "SnowflakeRole": "" + "Enabled": "Enable Snowflake role", + "SnowflakeRole": "The Snowflake role you wish to configure" }, "AWS::KinesisFirehose::DeliveryStream SnowflakeVpcConfiguration": { - "PrivateLinkVpceId": "" + "PrivateLinkVpceId": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)" }, "AWS::KinesisFirehose::DeliveryStream SplunkBufferingHints": { "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 60 (1 minute).", @@ -20621,17 +20728,17 @@ "AWS::KinesisFirehose::DeliveryStream SplunkDestinationConfiguration": { "BufferingHints": "The buffering options. If no value is specified, the default values for Splunk are used.", "CloudWatchLoggingOptions": "The Amazon CloudWatch logging options for your delivery stream.", - "HECAcknowledgmentTimeoutInSeconds": "The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.", - "HECEndpoint": "The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.", + "HECAcknowledgmentTimeoutInSeconds": "The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.", + "HECEndpoint": "The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.", "HECEndpointType": "This type can be either `Raw` or `Event` .", "HECToken": "This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.", "ProcessingConfiguration": "The data processing configuration.", - "RetryOptions": "The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.", - "S3BackupMode": "Defines how documents should be delivered to Amazon S3. When set to `FailedEventsOnly` , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to `AllEvents` , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is `FailedEventsOnly` .\n\nYou can update this backup mode from `FailedEventsOnly` to `AllEvents` . You can't update it from `AllEvents` to `FailedEventsOnly` .", + "RetryOptions": "The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.", + "S3BackupMode": "Defines how documents should be delivered to Amazon S3. When set to `FailedEventsOnly` , Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to `AllEvents` , Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is `FailedEventsOnly` .\n\nYou can update this backup mode from `FailedEventsOnly` to `AllEvents` . You can't update it from `AllEvents` to `FailedEventsOnly` .", "S3Configuration": "The configuration for the backup Amazon S3 location." }, "AWS::KinesisFirehose::DeliveryStream SplunkRetryOptions": { - "DurationInSeconds": "The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt." + "DurationInSeconds": "The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt." }, "AWS::KinesisFirehose::DeliveryStream Tag": { "Key": "A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @", @@ -20886,7 +20993,7 @@ "OnSuccess": "The destination configuration for successful invocations." }, "AWS::Lambda::EventInvokeConfig OnFailure": { - "Destination": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination." + "Destination": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination." }, "AWS::Lambda::EventInvokeConfig OnSuccess": { "Destination": "The Amazon Resource Name (ARN) of the destination resource." @@ -20895,7 +21002,7 @@ "AmazonManagedKafkaEventSourceConfig": "Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.", "BatchSize": "The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).\n\n- *Amazon Kinesis* \u2013 Default 100. Max 10,000.\n- *Amazon DynamoDB Streams* \u2013 Default 100. Max 10,000.\n- *Amazon Simple Queue Service* \u2013 Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.\n- *Amazon Managed Streaming for Apache Kafka* \u2013 Default 100. Max 10,000.\n- *Self-managed Apache Kafka* \u2013 Default 100. Max 10,000.\n- *Amazon MQ (ActiveMQ and RabbitMQ)* \u2013 Default 100. Max 10,000.\n- *DocumentDB* \u2013 Default 100. Max 10,000.", "BisectBatchOnFunctionError": "(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.", - "DestinationConfig": "(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.", + "DestinationConfig": "(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.", "DocumentDBEventSourceConfig": "Specific configuration settings for a DocumentDB event source.", "Enabled": "When true, the event source mapping is active. When false, Lambda pauses polling and invocation.\n\nDefault: True", "EventSourceArn": "The Amazon Resource Name (ARN) of the event source.\n\n- *Amazon Kinesis* \u2013 The ARN of the data stream or a stream consumer.\n- *Amazon DynamoDB Streams* \u2013 The ARN of the stream.\n- *Amazon Simple Queue Service* \u2013 The ARN of the queue.\n- *Amazon Managed Streaming for Apache Kafka* \u2013 The ARN of the cluster or the ARN of the VPC connection (for [cross-account event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc) ).\n- *Amazon MQ* \u2013 The ARN of the broker.\n- *Amazon DocumentDB* \u2013 The ARN of the DocumentDB change stream.", @@ -20937,7 +21044,7 @@ "Filters": "A list of filters." }, "AWS::Lambda::EventSourceMapping OnFailure": { - "Destination": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination." + "Destination": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination." }, "AWS::Lambda::EventSourceMapping ScalingConfig": { "MaximumConcurrency": "Limits the number of concurrent instances that the Amazon SQS event source can invoke." @@ -21972,6 +22079,7 @@ "Tags": "Applies one or more tags to the map resource. A tag is a key-value pair helps manage, identify, search, and filter your resources by labelling them.\n\nFormat: `\"key\" : \"value\"`\n\nRestrictions:\n\n- Maximum 50 tags per resource\n- Each resource tag must be unique with a maximum of one value.\n- Maximum key length: 128 Unicode characters in UTF-8\n- Maximum value length: 256 Unicode characters in UTF-8\n- Can use alphanumeric characters (A\u2013Z, a\u2013z, 0\u20139), and the following characters: + - = . _ : / @.\n- Cannot use \"aws:\" as a prefix for a key." }, "AWS::Location::Map MapConfiguration": { + "CustomLayers": "Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as the `POI` layer for the VectorEsriNavigation style.\n\n> Currenlty only `VectorEsriNavigation` supports CustomLayers. For more information, see [Custom Layers](https://docs.aws.amazon.com//location/latest/developerguide/map-concepts.html#map-custom-layers) .", "PoliticalView": "Specifies the map political view selected from an available data provider.", "Style": "Specifies the map style selected from an available data provider.\n\nValid [Esri map styles](https://docs.aws.amazon.com/location/latest/developerguide/esri.html) :\n\n- `VectorEsriNavigation` \u2013 The Esri Navigation map style, which provides a detailed basemap for the world symbolized with a custom navigation map style that's designed for use during the day in mobile devices. It also includes a richer set of places, such as shops, services, restaurants, attractions, and other points of interest. Enable the `POI` layer by setting it in CustomLayers to leverage the additional places data.\n- `RasterEsriImagery` \u2013 The Esri Imagery map style. A raster basemap that provides one meter or better satellite and aerial imagery in many parts of the world and lower resolution satellite imagery worldwide.\n- `VectorEsriLightGrayCanvas` \u2013 The Esri Light Gray Canvas map style, which provides a detailed vector basemap with a light gray, neutral background style with minimal colors, labels, and features that's designed to draw attention to your thematic content.\n- `VectorEsriTopographic` \u2013 The Esri Light map style, which provides a detailed vector basemap with a classic Esri map style.\n- `VectorEsriStreets` \u2013 The Esri Street Map style, which provides a detailed vector basemap for the world symbolized with a classic Esri street map style. The vector tile layer is similar in content and style to the World Street Map raster map.\n- `VectorEsriDarkGrayCanvas` \u2013 The Esri Dark Gray Canvas map style. A vector basemap with a dark gray, neutral background with minimal colors, labels, and features that's designed to draw attention to your thematic content.\n\nValid [HERE Technologies map styles](https://docs.aws.amazon.com/location/latest/developerguide/HERE.html) :\n\n- `VectorHereExplore` \u2013 A default HERE map style containing a neutral, global map and its features including roads, buildings, landmarks, and water features. It also now includes a fully designed map of Japan.\n- `RasterHereExploreSatellite` \u2013 A global map containing high resolution satellite imagery.\n- `HybridHereExploreSatellite` \u2013 A global map displaying the road network, street names, and city labels over satellite imagery. This style will automatically retrieve both raster and vector tiles, and your charges will be based on total tiles retrieved.\n\n> Hybrid styles use both vector and raster tiles when rendering the map that you see. This means that more tiles are retrieved than when using either vector or raster tiles alone. Your charges will include all tiles retrieved.\n- `VectorHereContrast` \u2013 The HERE Contrast (Berlin) map style is a high contrast detailed base map of the world that blends 3D and 2D rendering.\n\n> The `VectorHereContrast` style has been renamed from `VectorHereBerlin` . `VectorHereBerlin` has been deprecated, but will continue to work in applications that use it.\n- `VectorHereExploreTruck` \u2013 A global map containing truck restrictions and attributes (e.g. width / height / HAZMAT) symbolized with highlighted segments and icons on top of HERE Explore to support use cases within transport and logistics.\n\nValid [GrabMaps map styles](https://docs.aws.amazon.com/location/latest/developerguide/grab.html) :\n\n- `VectorGrabStandardLight` \u2013 The Grab Standard Light map style provides a basemap with detailed land use coloring, area names, roads, landmarks, and points of interest covering Southeast Asia.\n- `VectorGrabStandardDark` \u2013 The Grab Standard Dark map style provides a dark variation of the standard basemap covering Southeast Asia.\n\n> Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region ( `ap-southeast-1` ). For more information, see [GrabMaps countries and area covered](https://docs.aws.amazon.com/location/latest/developerguide/grab.html#grab-coverage-area) . \n\nValid [Open Data map styles](https://docs.aws.amazon.com/location/latest/developerguide/open-data.html) :\n\n- `VectorOpenDataStandardLight` \u2013 The Open Data Standard Light map style provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.\n- `VectorOpenDataStandardDark` \u2013 Open Data Standard Dark is a dark-themed map style that provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries.\n- `VectorOpenDataVisualizationLight` \u2013 The Open Data Visualization Light map style is a light-themed style with muted colors and fewer features that aids in understanding overlaid data.\n- `VectorOpenDataVisualizationDark` \u2013 The Open Data Visualization Dark map style is a dark-themed style with muted colors and fewer features that aids in understanding overlaid data." }, @@ -22109,7 +22217,7 @@ }, "AWS::Logs::QueryDefinition": { "LogGroupNames": "Use this parameter if you want the query to query only certain log groups.", - "Name": "A name for the query definition.", + "Name": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `/ *folder-name* / *query-name*` .", "QueryString": "The query string to use for this query definition. For more information, see [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) ." }, "AWS::Logs::ResourcePolicy": { @@ -25362,8 +25470,8 @@ "Script": "The initialization script." }, "AWS::OSIS::Pipeline": { - "BufferOptions": "Options that specify the configuration of a persistent buffer. To configure how OpenSearch Ingestion encrypts this data, set the EncryptionAtRestOptions.", - "EncryptionAtRestOptions": "Options to control how OpenSearch encrypts all data-at-rest.", + "BufferOptions": "Options that specify the configuration of a persistent buffer. To configure how OpenSearch Ingestion encrypts this data, set the `EncryptionAtRestOptions` . For more information, see [Persistent buffering](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/osis-features-overview.html#persistent-buffering) .", + "EncryptionAtRestOptions": "Options to control how OpenSearch encrypts buffer data.", "LogPublishingOptions": "Key-value pairs that represent log publishing settings.", "MaxUnits": "The maximum pipeline capacity, in Ingestion Compute Units (ICUs).", "MinUnits": "The minimum pipeline capacity, in Ingestion Compute Units (ICUs).", @@ -25376,10 +25484,10 @@ "PersistentBufferEnabled": "Whether persistent buffering should be enabled." }, "AWS::OSIS::Pipeline CloudWatchLogDestination": { - "LogGroup": "The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, `/aws/OpenSearchService/IngestionService/my-pipeline` ." + "LogGroup": "The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, `/aws/vendedlogs/OpenSearchService/pipelines` ." }, "AWS::OSIS::Pipeline EncryptionAtRestOptions": { - "KmsKeyArn": "The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion. By default, data is encrypted using an AWS owned key." + "KmsKeyArn": "The ARN of the KMS key used to encrypt buffer data. By default, data is encrypted using an AWS owned key." }, "AWS::OSIS::Pipeline LogPublishingOptions": { "CloudWatchLogDestination": "The destination for OpenSearch Ingestion logs sent to Amazon CloudWatch Logs. This parameter is required if `IsLoggingEnabled` is set to `true` .", @@ -25500,7 +25608,7 @@ "AWS::OpenSearchServerless::Collection": { "Description": "A description of the collection.", "Name": "The name of the collection.\n\nCollection names must meet the following criteria:\n\n- Starts with a lowercase letter\n- Unique to your account and AWS Region\n- Contains between 3 and 28 characters\n- Contains only lowercase letters a-z, the numbers 0-9, and the hyphen (-)", - "StandbyReplicas": "Details about an OpenSearch Serverless collection.", + "StandbyReplicas": "Indicates whether standby replicas should be used for a collection.", "Tags": "An arbitrary set of tags (key\u2013value pairs) to associate with the collection.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "Type": "The type of collection. Possible values are `SEARCH` , `TIMESERIES` , and `VECTORSEARCH` . For more information, see [Choosing a collection type](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-overview.html#serverless-usecase) ." }, @@ -26574,7 +26682,7 @@ }, "AWS::Pinpoint::EventStream": { "ApplicationId": "The unique identifier for the Amazon Pinpoint application that you want to export data from.", - "DestinationStreamArn": "The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis data stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", + "DestinationStreamArn": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", "RoleArn": "The AWS Identity and Access Management (IAM) role that authorizes Amazon Pinpoint to publish event data to the stream in your AWS account." }, "AWS::Pinpoint::GCMChannel": { @@ -35592,8 +35700,8 @@ "ReplicationSourceIdentifier": "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.\n\nValid for: Aurora DB clusters only", "RestoreToTime": "The date and time to restore the DB cluster to.\n\nValid Values: Value must be a time in Universal Coordinated Time (UTC) format\n\nConstraints:\n\n- Must be before the latest restorable time for the DB instance\n- Must be specified if `UseLatestRestorableTime` parameter isn't provided\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled\n- Can't be specified if the `RestoreType` parameter is `copy-on-write`\n\nThis property must be used with `SourceDBClusterIdentifier` property. The resulting cluster will have the identifier that matches the value of the `DBclusterIdentifier` property.\n\nExample: `2015-03-07T23:45:00Z`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "RestoreType": "The type of restore to be performed. You can specify one of the following values:\n\n- `full-copy` - The new DB cluster is restored as a full copy of the source DB cluster.\n- `copy-on-write` - The new DB cluster is restored as a clone of the source DB cluster.\n\nIf you don't specify a `RestoreType` value, then the new DB cluster is restored as a full copy of the source DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "ScalingConfiguration": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, use `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", - "ServerlessV2ScalingConfiguration": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, use `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "ScalingConfiguration": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "ServerlessV2ScalingConfiguration": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", "SnapshotIdentifier": "The identifier for the DB snapshot or DB cluster snapshot to restore from.\n\nYou can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.\n\nAfter you restore a DB cluster with a `SnapshotIdentifier` property, you must specify the same `SnapshotIdentifier` property for any future updates to the DB cluster. When you specify this property for an update, the DB cluster is not restored from the snapshot again, and the data in the database is not changed. However, if you don't specify the `SnapshotIdentifier` property, an empty DB cluster is created, and the original DB cluster is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB cluster is restored from the specified `SnapshotIdentifier` property, and the original DB cluster is deleted.\n\nIf you specify the `SnapshotIdentifier` property to restore a DB cluster (as opposed to specifying it for DB cluster updates), then don't specify the following properties:\n\n- `GlobalClusterIdentifier`\n- `MasterUsername`\n- `MasterUserPassword`\n- `ReplicationSourceIdentifier`\n- `RestoreType`\n- `SourceDBClusterIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `UseLatestRestorableTime`\n\nConstraints:\n\n- Must match the identifier of an existing Snapshot.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "SourceDBClusterIdentifier": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .\n\nValid for: Aurora DB clusters only", @@ -35861,6 +35969,18 @@ "SourceDBClusterIdentifier": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database.\n\nIf you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:\n\n- `DatabaseName`\n- `Engine`\n- `EngineVersion`\n- `StorageEncrypted`", "StorageEncrypted": "Specifies whether to enable storage encryption for the new global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the setting from the source DB cluster." }, + "AWS::RDS::Integration": { + "AdditionalEncryptionContext": "An optional set of non-secret key\u2013value pairs that contains additional contextual information about the data. For more information, see [Encryption context](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the *AWS Key Management Service Developer Guide* .\n\nYou can only include this parameter if you specify the `KMSKeyId` parameter.", + "IntegrationName": "The name of the integration.", + "KMSKeyId": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", + "SourceArn": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", + "Tags": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "TargetArn": "The ARN of the Redshift data warehouse to use as the target for replication." + }, + "AWS::RDS::Integration Tag": { + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." + }, "AWS::RDS::OptionGroup": { "EngineName": "Specifies the name of the engine that this option group should be associated with.\n\nValid Values:\n\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "MajorEngineVersion": "Specifies the major version of the engine that this option group should be associated with.", @@ -36101,6 +36221,7 @@ "ResumeCluster": "An action that runs a `ResumeCluster` API operation." }, "AWS::RedshiftServerless::Namespace": { + "AdminPasswordSecretKmsKeyId": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret.", "AdminUserPassword": "The password of the administrator for the primary database created in the namespace.", "AdminUsername": "The username of the administrator for the primary database created in the namespace.", "DbName": "The name of the primary database created in the namespace.", @@ -36110,10 +36231,15 @@ "IamRoles": "A list of IAM roles to associate with the namespace.", "KmsKeyId": "The ID of the AWS Key Management Service key used to encrypt your data.", "LogExports": "The types of logs the namespace can export. Available export types are `userlog` , `connectionlog` , and `useractivitylog` .", + "ManageAdminPassword": "If true, Amazon Redshift uses AWS Secrets Manager to manage the namespace's admin credentials.", "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", + "NamespaceResourcePolicy": "The resource policy object. Currently, you can use policies to share snapshots across AWS accounts.", + "RedshiftIdcApplicationArn": "The ARN for the Redshift application that integrates with IAM Identity Center.", "Tags": "The map of the key-value pairs used to tag the namespace." }, "AWS::RedshiftServerless::Namespace Namespace": { + "AdminPasswordSecretArn": "The Amazon Resource Name (ARN) for the namespace's admin user credentials secret.", + "AdminPasswordSecretKmsKeyId": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret.", "AdminUsername": "The username of the administrator for the first database created in the namespace.", "CreationDate": "The date of when the namespace was created.", "DbName": "The name of the first database created in the namespace.", @@ -38249,16 +38375,29 @@ }, "AWS::SageMaker::AppImageConfig": { "AppImageConfigName": "The name of the AppImageConfig. Must be unique to your account.", + "JupyterLabAppImageConfig": "The configuration for the file system and the runtime, such as the environment variables and entry point.", "KernelGatewayImageConfig": "The configuration for the file system and kernels in the SageMaker image.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, + "AWS::SageMaker::AppImageConfig ContainerConfig": { + "ContainerArguments": "The arguments for the container when you're running the application.", + "ContainerEntrypoint": "The entrypoint used to run the application in the container.", + "ContainerEnvironmentVariables": "The environment variables to set in the container" + }, + "AWS::SageMaker::AppImageConfig CustomImageContainerEnvironmentVariable": { + "Key": "The key that identifies a container environment variable.", + "Value": "The value of the container environment variable." + }, "AWS::SageMaker::AppImageConfig FileSystemConfig": { "DefaultGid": "The default POSIX group ID (GID). If not specified, defaults to `100` .", "DefaultUid": "The default POSIX user ID (UID). If not specified, defaults to `1000` .", "MountPath": "The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to */home/sagemaker-user* ." }, + "AWS::SageMaker::AppImageConfig JupyterLabAppImageConfig": { + "ContainerConfig": "The configuration used to run the application image container." + }, "AWS::SageMaker::AppImageConfig KernelGatewayImageConfig": { - "FileSystemConfig": "The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.", + "FileSystemConfig": "The Amazon Elastic File System storage configuration for a SageMaker image.", "KernelSpecs": "The specification of the Jupyter kernels in the image." }, "AWS::SageMaker::AppImageConfig KernelSpec": { @@ -38415,7 +38554,7 @@ "AppNetworkAccessType": "Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly` .\n\n- `PublicInternetOnly` - Non-EFS traffic is through a VPC managed by Amazon SageMaker , which allows direct internet access\n- `VpcOnly` - All Studio traffic is through the specified VPC and subnets\n\n*Valid Values* : `PublicInternetOnly | VpcOnly`", "AppSecurityGroupManagement": "The entity that creates and manages the required security groups for inter-app communication in `VpcOnly` mode. Required when `CreateDomain.AppNetworkAccessType` is `VpcOnly` and `DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn` is provided. If setting up the domain for use with RStudio, this value must be set to `Service` .\n\n*Allowed Values* : `Service` | `Customer`", "AuthMode": "The mode of authentication that members use to access the Domain.\n\n*Valid Values* : `SSO | IAM`", - "DefaultSpaceSettings": "A collection of settings that apply to spaces created in the Domain.", + "DefaultSpaceSettings": "A collection of settings that apply to spaces created in the domain.", "DefaultUserSettings": "The default user settings.", "DomainName": "The domain name.", "DomainSettings": "A collection of settings that apply to the `SageMaker Domain` . These settings are specified through the `CreateDomain` API call.", @@ -38425,7 +38564,7 @@ "VpcId": "The ID of the Amazon Virtual Private Cloud (Amazon VPC) that Studio uses for communication.\n\n*Length Constraints* : Maximum length of 32.\n\n*Pattern* : `[-0-9a-zA-Z]+`" }, "AWS::SageMaker::Domain CodeEditorAppSettings": { - "DefaultResourceSpec": "", + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Code Editor application lifecycle configuration." }, "AWS::SageMaker::Domain CodeRepository": { @@ -38451,12 +38590,17 @@ "ExecutionRole": "The ARN of the execution role for the space.", "JupyterServerAppSettings": "The JupyterServer app settings.", "KernelGatewayAppSettings": "The KernelGateway app settings.", - "SecurityGroups": "The security group IDs for the Amazon Virtual Private Cloud that the space uses for communication." + "SecurityGroups": "The security group IDs for the Amazon VPC that the space uses for communication." }, "AWS::SageMaker::Domain DefaultSpaceStorageSettings": { "DefaultEbsStorageSettings": "The default EBS storage settings for a private space." }, + "AWS::SageMaker::Domain DockerSettings": { + "EnableDockerAccess": "Indicates whether the domain can access Docker.", + "VpcOnlyTrustedAccounts": "The list of AWS accounts that are trusted when the domain is created in VPC-only mode." + }, "AWS::SageMaker::Domain DomainSettings": { + "DockerSettings": "A collection of settings that configure the domain's Docker interaction.", "RStudioServerProDomainSettings": "A collection of settings that configure the `RStudioServerPro` Domain-level app.", "SecurityGroupIds": "The security groups for the Amazon Virtual Private Cloud that the `Domain` uses for communication between Domain-level apps and user apps." }, @@ -38467,7 +38611,7 @@ "AWS::SageMaker::Domain JupyterLabAppSettings": { "CodeRepositories": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", "CustomImages": "A list of custom SageMaker images that are configured to run as a JupyterLab app.", - "DefaultResourceSpec": "", + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain JupyterServerAppSettings": { @@ -38475,7 +38619,7 @@ }, "AWS::SageMaker::Domain KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." }, "AWS::SageMaker::Domain RSessionAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a RSession app.", @@ -39221,7 +39365,7 @@ }, "AWS::SageMaker::ModelExplainabilityJobDefinition ModelExplainabilityJobInput": { "BatchTransformInput": "Input object for the batch transform job.", - "EndpointInput": "" + "EndpointInput": "Input object for the endpoint" }, "AWS::SageMaker::ModelExplainabilityJobDefinition MonitoringOutput": { "S3Output": "The Amazon S3 storage location where the results of a monitoring job are saved." @@ -39759,31 +39903,67 @@ "Value": "The tag value." }, "AWS::SageMaker::Space": { - "DomainId": "The ID of the associated Domain.", + "DomainId": "The ID of the associated domain.", + "OwnershipSettings": "The collection of ownership settings for a space.", + "SpaceDisplayName": "The name of the space that appears in the Studio UI.", "SpaceName": "The name of the space.", "SpaceSettings": "A collection of space settings.", + "SpaceSharingSettings": "A collection of space sharing settings.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, + "AWS::SageMaker::Space CodeRepository": { + "RepositoryUrl": "The URL of the Git repository." + }, + "AWS::SageMaker::Space CustomFileSystem": { + "EFSFileSystem": "A custom file system in Amazon EFS." + }, "AWS::SageMaker::Space CustomImage": { "AppImageConfigName": "The name of the AppImageConfig.", "ImageName": "The name of the CustomImage. Must be unique to your account.", "ImageVersionNumber": "The version number of the CustomImage." }, + "AWS::SageMaker::Space EFSFileSystem": { + "FileSystemId": "The ID of your Amazon EFS file system." + }, + "AWS::SageMaker::Space EbsStorageSettings": { + "EbsVolumeSizeInGb": "The size of an EBS storage volume for a private space." + }, "AWS::SageMaker::Space JupyterServerAppSettings": { "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required." }, "AWS::SageMaker::Space KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + }, + "AWS::SageMaker::Space OwnershipSettings": { + "OwnerUserProfileName": "The user profile who is the owner of the private space." }, "AWS::SageMaker::Space ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, + "AWS::SageMaker::Space SpaceCodeEditorAppSettings": { + "DefaultResourceSpec": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + }, + "AWS::SageMaker::Space SpaceJupyterLabAppSettings": { + "CodeRepositories": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", + "DefaultResourceSpec": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + }, "AWS::SageMaker::Space SpaceSettings": { + "AppType": "The type of app created within the space.", + "CodeEditorAppSettings": "The Code Editor application settings.", + "CustomFileSystems": "A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "JupyterLabAppSettings": "The settings for the JupyterLab application.", "JupyterServerAppSettings": "The JupyterServer app settings.", - "KernelGatewayAppSettings": "The KernelGateway app settings." + "KernelGatewayAppSettings": "The KernelGateway app settings.", + "SpaceStorageSettings": "The storage settings for a private space." + }, + "AWS::SageMaker::Space SpaceSharingSettings": { + "SharingType": "Specifies the sharing type of the space." + }, + "AWS::SageMaker::Space SpaceStorageSettings": { + "EbsStorageSettings": "A collection of EBS storage settings for a private space." }, "AWS::SageMaker::Space Tag": { "Key": "The tag key. Tag keys must be unique per resource.", @@ -39798,7 +39978,7 @@ "UserSettings": "A collection of settings that apply to users of Amazon SageMaker Studio." }, "AWS::SageMaker::UserProfile CodeEditorAppSettings": { - "DefaultResourceSpec": "", + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Code Editor application lifecycle configuration." }, "AWS::SageMaker::UserProfile CodeRepository": { @@ -39830,7 +40010,7 @@ "AWS::SageMaker::UserProfile JupyterLabAppSettings": { "CodeRepositories": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", "CustomImages": "A list of custom SageMaker images that are configured to run as a JupyterLab app.", - "DefaultResourceSpec": "", + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile JupyterServerAppSettings": { @@ -39838,7 +40018,7 @@ }, "AWS::SageMaker::UserProfile KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." }, "AWS::SageMaker::UserProfile RStudioServerProAppSettings": { "AccessStatus": "Indicates whether the current user has access to the `RStudioServerPro` app.", @@ -39878,7 +40058,7 @@ "MemberDefinitions": "A list of `MemberDefinition` objects that contains objects that identify the workers that make up the work team.\n\nWorkforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `CognitoMemberDefinition` . For workforces created using your own OIDC identity provider (IdP) use `OidcMemberDefinition` .", "NotificationConfiguration": "Configures SNS notifications of available or expiring work items for work teams.", "Tags": "An array of key-value pairs.", - "WorkforceName": "", + "WorkforceName": "The name of the workforce.", "WorkteamName": "The name of the work team." }, "AWS::SageMaker::Workteam CognitoMemberDefinition": { @@ -42216,7 +42396,7 @@ "DirectoryId": "The identifier of the AWS Directory Service directory for the WorkSpace.", "RootVolumeEncryptionEnabled": "Indicates whether the data stored on the root volume is encrypted.", "Tags": "The tags for the WorkSpace.", - "UserName": "The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.", + "UserName": "The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.\n\nThe reserved keyword, `[UNDEFINED]` , is used when creating user-decoupled WorkSpaces.", "UserVolumeEncryptionEnabled": "Indicates whether the data stored on the user volume is encrypted.", "VolumeEncryptionKey": "The ARN of the symmetric AWS KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.", "WorkspaceProperties": "The WorkSpace properties." @@ -42267,7 +42447,7 @@ "Value": "The value of the tag" }, "AWS::WorkSpacesWeb::IdentityProvider": { - "IdentityProviderDetails": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- For Facebook:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- `api_version`\n- For Sign in with Apple:\n\n- `client_id`\n- `team_id`\n- `key_id`\n- `private_key`\n- `authorize_scopes`\n- For OIDC providers:\n\n- `client_id`\n- `client_secret`\n- `attributes_request_method`\n- `oidc_issuer`\n- `authorize_scopes`\n- `authorize_url` *if not available from discovery URL specified by oidc_issuer key*\n- `token_url` *if not available from discovery URL specified by oidc_issuer key*\n- `attributes_url` *if not available from discovery URL specified by oidc_issuer key*\n- `jwks_uri` *if not available from discovery URL specified by oidc_issuer key*\n- For SAML providers:\n\n- `MetadataFile` OR `MetadataURL`\n- `IDPSignout` *optional*", + "IdentityProviderDetails": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- For Facebook:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- `api_version`\n- For Sign in with Apple:\n\n- `client_id`\n- `team_id`\n- `key_id`\n- `private_key`\n- `authorize_scopes`\n- For OIDC providers:\n\n- `client_id`\n- `client_secret`\n- `attributes_request_method`\n- `oidc_issuer`\n- `authorize_scopes`\n- `authorize_url` *if not available from discovery URL specified by oidc_issuer key*\n- `token_url` *if not available from discovery URL specified by oidc_issuer key*\n- `attributes_url` *if not available from discovery URL specified by oidc_issuer key*\n- `jwks_uri` *if not available from discovery URL specified by oidc_issuer key*\n- For SAML providers:\n\n- `MetadataFile` OR `MetadataURL`\n- `IDPSignout` (boolean) *optional*\n- `IDPInit` (boolean) *optional*\n- `RequestSigningAlgorithm` (string) *optional* - Only accepts `rsa-sha256`\n- `EncryptedResponses` (boolean) *optional*", "IdentityProviderName": "The identity provider name.", "IdentityProviderType": "The identity provider type.", "PortalArn": "The ARN of the identity provider." diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 70c7e70e6..7f76210be 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -673,7 +673,7 @@ "properties": { "CrlDistributionPointExtensionConfiguration": { "$ref": "#/definitions/AWS::ACMPCA::CertificateAuthority.CrlDistributionPointExtensionConfiguration", - "markdownDescription": "", + "markdownDescription": "Configures the default behavior of the CRL Distribution Point extension for certificates issued by your CA. If this field is not provided, then the CRL Distribution Point extension will be present and contain the default CRL URL.", "title": "CrlDistributionPointExtensionConfiguration" }, "CustomCname": { @@ -708,7 +708,7 @@ "additionalProperties": false, "properties": { "OmitExtension": { - "markdownDescription": "", + "markdownDescription": "Configures whether the CRL Distribution Point extension should be populated with the default URL to the CRL. If set to `true` , then the CDP extension will not be present in any certificates issued by that CA unless otherwise specified through CSR or API passthrough.\n\n> Only set this if you have another way to distribute the CRL Distribution Points for certificates issued by your CA, such as the Matter Distributed Compliance Ledger.\n> \n> This configuration cannot be enabled with a custom CNAME set.", "title": "OmitExtension", "type": "boolean" } @@ -4930,7 +4930,7 @@ "title": "AccessLogSetting" }, "CacheClusterEnabled": { - "markdownDescription": "Specifies whether a cache cluster is enabled for the stage.", + "markdownDescription": "Specifies whether a cache cluster is enabled for the stage. To activate a method-level cache, set `CachingEnabled` to `true` for a method.", "title": "CacheClusterEnabled", "type": "boolean" }, @@ -6261,7 +6261,7 @@ "title": "AccessLogSetting" }, "CacheClusterEnabled": { - "markdownDescription": "Specifies whether a cache cluster is enabled for the stage.", + "markdownDescription": "Specifies whether a cache cluster is enabled for the stage. To activate a method-level cache, set `CachingEnabled` to `true` for a method.", "title": "CacheClusterEnabled", "type": "boolean" }, @@ -9161,13 +9161,9 @@ "additionalProperties": false, "properties": { "AlarmArn": { - "markdownDescription": "Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.", - "title": "AlarmArn", "type": "string" }, "AlarmRoleArn": { - "markdownDescription": "ARN of an AWS Identity and Access Management (IAM) role for AWS AppConfig to monitor `AlarmArn` .", - "title": "AlarmRoleArn", "type": "string" } }, @@ -9177,13 +9173,9 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", - "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "The tag value can be up to 256 characters.", - "title": "Value", "type": "string" } }, @@ -22323,12 +22315,12 @@ "additionalProperties": false, "properties": { "MaxHealthyPercentage": { - "markdownDescription": "Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. After it's set, a value of `-1` will clear the previously set value.\n\nBoth `MinHealthyPercentage` and `MaxHealthyPercentage` must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.", + "markdownDescription": "Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. To clear a previously set value, specify a value of `-1` .\n\nBoth `MinHealthyPercentage` and `MaxHealthyPercentage` must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.", "title": "MaxHealthyPercentage", "type": "number" }, "MinHealthyPercentage": { - "markdownDescription": "Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. After it's set, a value of `-1` will clear the previously set value.", + "markdownDescription": "Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. To clear a previously set value, specify a value of `-1` .", "title": "MinHealthyPercentage", "type": "number" } @@ -22453,7 +22445,7 @@ "title": "NetworkInterfaceCount" }, "OnDemandMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.\n\nDefault: `20`", + "markdownDescription": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per instance price.\n\nDefault: `20`", "title": "OnDemandMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -22463,7 +22455,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for Spot Instances. This is the maximum you\u2019ll pay for a Spot Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.\n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -29517,7 +29509,7 @@ }, "EncryptionSpecification": { "$ref": "#/definitions/AWS::Cassandra::Table.EncryptionSpecification", - "markdownDescription": "The encryption at rest options for the table.\n\n- *AWS owned key* (default) - The key is owned by Amazon Keyspaces.\n- *Customer managed key* - The key is stored in your account and is created, owned, and managed by you.\n\n> If you choose encryption with a customer managed key, you must specify a valid customer managed KMS key with permissions granted to Amazon Keyspaces.\n\nFor more information, see [Encryption at rest in Amazon Keyspaces](https://docs.aws.amazon.com/keyspaces/latest/devguide/EncryptionAtRest.html) in the *Amazon Keyspaces Developer Guide* .", + "markdownDescription": "The encryption at rest options for the table.\n\n- *AWS owned key* (default) - The key is owned by Amazon Keyspaces .\n- *Customer managed key* - The key is stored in your account and is created, owned, and managed by you.\n\n> If you choose encryption with a customer managed key, you must specify a valid customer managed KMS key with permissions granted to Amazon Keyspaces.\n\nFor more information, see [Encryption at rest in Amazon Keyspaces](https://docs.aws.amazon.com/keyspaces/latest/devguide/EncryptionAtRest.html) in the *Amazon Keyspaces Developer Guide* .", "title": "EncryptionSpecification" }, "KeyspaceName": { @@ -33696,7 +33688,7 @@ "items": { "$ref": "#/definitions/AWS::CloudFront::Distribution.FunctionAssociation" }, - "markdownDescription": "A list of CloudFront functions that are associated with this cache behavior. CloudFront functions must be published to the `LIVE` stage to associate them with a cache behavior.", + "markdownDescription": "A list of CloudFront functions that are associated with this cache behavior. Your functions must be published to the `LIVE` stage to associate them with a cache behavior.", "title": "FunctionAssociations", "type": "array" }, @@ -33890,7 +33882,7 @@ "title": "ViewerCertificate" }, "WebACLId": { - "markdownDescription": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `473e64fd-f30b-4765-81a0-62ad96dd167a` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) .", + "markdownDescription": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) .", "title": "WebACLId", "type": "string" } @@ -34455,7 +34447,7 @@ "items": { "$ref": "#/definitions/AWS::CloudFront::Function.KeyValueStoreAssociation" }, - "markdownDescription": "The configuration for the Key Value Store associations.", + "markdownDescription": "The configuration for the key value store associations.", "title": "KeyValueStoreAssociations", "type": "array" }, @@ -34486,7 +34478,7 @@ "additionalProperties": false, "properties": { "KeyValueStoreARN": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Key Value Store association.", + "markdownDescription": "The Amazon Resource Name (ARN) of the key value store association.", "title": "KeyValueStoreARN", "type": "string" } @@ -34627,17 +34619,17 @@ "additionalProperties": false, "properties": { "Comment": { - "markdownDescription": "A comment for the Key Value Store.", + "markdownDescription": "A comment for the key value store.", "title": "Comment", "type": "string" }, "ImportSource": { "$ref": "#/definitions/AWS::CloudFront::KeyValueStore.ImportSource", - "markdownDescription": "The import source for the Key Value Store.", + "markdownDescription": "The import source for the key value store.", "title": "ImportSource" }, "Name": { - "markdownDescription": "The name of the Key Value Store.", + "markdownDescription": "The name of the key value store.", "title": "Name", "type": "string" } @@ -34672,12 +34664,12 @@ "additionalProperties": false, "properties": { "SourceArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the import source for the Key Value Store.", + "markdownDescription": "The Amazon Resource Name (ARN) of the import source for the key value store.", "title": "SourceArn", "type": "string" }, "SourceType": { - "markdownDescription": "The source type of the import source for the Key Value Store.", + "markdownDescription": "The source type of the import source for the key value store.", "title": "SourceType", "type": "string" } @@ -34862,7 +34854,7 @@ "type": "string" }, "Name": { - "markdownDescription": "A name to identify the origin access control.", + "markdownDescription": "A name to identify the origin access control. You can specify up to 64 characters.", "title": "Name", "type": "string" }, @@ -35689,7 +35681,7 @@ }, "StrictTransportSecurity": { "$ref": "#/definitions/AWS::CloudFront::ResponseHeadersPolicy.StrictTransportSecurity", - "markdownDescription": "Determines whether CloudFront includes the `Strict-Transport-Security` HTTP response header and the header's value.\n\nFor more information about the `Strict-Transport-Security` HTTP response header, see [Strict-Transport-Security](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) in the MDN Web Docs.", + "markdownDescription": "Determines whether CloudFront includes the `Strict-Transport-Security` HTTP response header and the header's value.\n\nFor more information about the `Strict-Transport-Security` HTTP response header, see [Security headers](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/understanding-response-headers-policies.html#understanding-response-headers-policies-security) in the *Amazon CloudFront Developer Guide* and [Strict-Transport-Security](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) in the MDN Web Docs.", "title": "StrictTransportSecurity" }, "XSSProtection": { @@ -38482,6 +38474,8 @@ "additionalProperties": false, "properties": { "FleetArn": { + "markdownDescription": "Specifies the compute fleet ARN for the build project.", + "title": "FleetArn", "type": "string" } }, @@ -40544,7 +40538,7 @@ "type": "string" }, "PipelineType": { - "markdownDescription": "CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.\n\n- V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.\n- V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.\n\n> Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs. \n\nFor information about pricing for CodePipeline, see [Pricing](https://docs.aws.amazon.com/https://aws.amazon.com/codepipeline/pricing/) .\n\nFor information about which type of pipeline to choose, see [What type of pipeline is right for me?](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-types-planning.html) .", + "markdownDescription": "CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.\n\n- V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.\n- V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.\n\n> Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs. \n\nFor information about pricing for CodePipeline, see [Pricing](https://docs.aws.amazon.com/codepipeline/pricing/) .\n\nFor information about which type of pipeline to choose, see [What type of pipeline is right for me?](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-types-planning.html) .", "title": "PipelineType", "type": "string" }, @@ -40803,7 +40797,7 @@ "items": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPushFilter" }, - "markdownDescription": "The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.\n\n> Git tags is the only supported event type.", + "markdownDescription": "The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.", "title": "Push", "type": "array" }, @@ -41570,7 +41564,7 @@ "additionalProperties": false, "properties": { "CreatedBy": { - "markdownDescription": "", + "markdownDescription": "The name or email alias of the person who created the notification rule.", "title": "CreatedBy", "type": "string" }, @@ -41580,7 +41574,7 @@ "type": "string" }, "EventTypeId": { - "markdownDescription": "", + "markdownDescription": "The event type associated with this notification rule. For a complete list of event types and IDs, see [Notification concepts](https://docs.aws.amazon.com/dtconsole/latest/userguide/concepts.html#concepts-api) in the *Developer Tools Console User Guide* .", "title": "EventTypeId", "type": "string" }, @@ -41619,7 +41613,7 @@ "type": "object" }, "TargetAddress": { - "markdownDescription": "", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SNS topic or AWS Chatbot client.", "title": "TargetAddress", "type": "string" }, @@ -41627,7 +41621,7 @@ "items": { "$ref": "#/definitions/AWS::CodeStarNotifications::NotificationRule.Target" }, - "markdownDescription": "A list of Amazon Resource Names (ARNs) of Amazon Simple Notification Service topics and AWS Chatbot clients to associate with the notification rule.", + "markdownDescription": "A list of Amazon Resource Names (ARNs) of Amazon SNS topics and AWS Chatbot clients to associate with the notification rule.", "title": "Targets", "type": "array" } @@ -41671,7 +41665,7 @@ "type": "string" }, "TargetType": { - "markdownDescription": "The target type. Can be an Amazon Simple Notification Service topic or AWS Chatbot client.\n\n- Amazon Simple Notification Service topics are specified as `SNS` .\n- AWS Chatbot clients are specified as `AWSChatbotSlack` .", + "markdownDescription": "The target type. Can be an Amazon Simple Notification Service topic or AWS Chatbot client.\n\n- Amazon Simple Notification Service topics are specified as `SNS` .\n- AWS Chatbot clients are specified as `AWSChatbotSlack` .\n- AWS Chatbot clients for Microsoft Teams are specified as `AWSChatbotMicrosoftTeams` .", "title": "TargetType", "type": "string" } @@ -42065,7 +42059,7 @@ "additionalProperties": false, "properties": { "AmbiguousRoleResolution": { - "markdownDescription": "Specifies the action to be taken if either no rules match the claim value for the Rules type, or there is no `cognito:preferred_role` claim and there are multiple `cognito:roles` matches for the Token type. If you specify Token or Rules as the Type, AmbiguousRoleResolution is required.\n\nValid values are `AuthenticatedRole` or `Deny` .", + "markdownDescription": "If you specify Token or Rules as the `Type` , `AmbiguousRoleResolution` is required.\n\nSpecifies the action to be taken if either no rules match the claim value for the `Rules` type, or there is no `cognito:preferred_role` claim and there are multiple `cognito:roles` matches for the `Token` type.", "title": "AmbiguousRoleResolution", "type": "string" }, @@ -42080,7 +42074,7 @@ "title": "RulesConfiguration" }, "Type": { - "markdownDescription": "The role-mapping type. `Token` uses `cognito:roles` and `cognito:preferred_role` claims from the Amazon Cognito identity provider token to map groups to roles. `Rules` attempts to match claims from the token to map to a role.\n\nValid values are `Token` or `Rules` .", + "markdownDescription": "The role mapping type. Token will use `cognito:roles` and `cognito:preferred_role` claims from the Cognito identity provider token to map groups to roles. Rules will attempt to match claims from the token to map to a role.", "title": "Type", "type": "string" } @@ -42276,7 +42270,7 @@ "type": "array" }, "DeletionProtection": { - "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "title": "DeletionProtection", "type": "string" }, @@ -42608,7 +42602,7 @@ }, "PreTokenGenerationConfig": { "$ref": "#/definitions/AWS::Cognito::UserPool.PreTokenGenerationConfig", - "markdownDescription": "", + "markdownDescription": "The detailed configuration of a pre token generation trigger. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", "title": "PreTokenGenerationConfig" }, "UserMigration": { @@ -42691,12 +42685,12 @@ "additionalProperties": false, "properties": { "LambdaArn": { - "markdownDescription": "", + "markdownDescription": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nThis parameter and the `PreTokenGeneration` property of `LambdaConfig` have the same value. For new instances of pre token generation triggers, set `LambdaArn` .", "title": "LambdaArn", "type": "string" }, "LambdaVersion": { - "markdownDescription": "", + "markdownDescription": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.", "title": "LambdaVersion", "type": "string" } @@ -43345,7 +43339,7 @@ "type": "array" }, "ProviderDetails": { - "markdownDescription": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", + "markdownDescription": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", "title": "ProviderDetails", "type": "object" }, @@ -43884,7 +43878,7 @@ "properties": { "ClientMetadata": { "additionalProperties": true, - "markdownDescription": "A map of custom key-value pairs that you can provide as input for the custom workflow that is invoked by the *pre sign-up* trigger.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you create a `UserPoolUser` resource and include the `ClientMetadata` property, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata property. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing User Pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> Take the following limitations into consideration when you use the ClientMetadata parameter:\n> \n> - Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.\n> - Amazon Cognito does not validate the ClientMetadata value.\n> - Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.", + "markdownDescription": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing user pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:\n> \n> - Store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose.\n> - Validate the ClientMetadata value.\n> - Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -59334,7 +59328,7 @@ }, "TaskReportConfig": { "$ref": "#/definitions/AWS::DataSync::Task.TaskReportConfig", - "markdownDescription": "Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.", + "markdownDescription": "Specifies how you want to configure a task report, which provides detailed information about your DataSync transfer. For more information, see [Monitoring your DataSync transfers with task reports](https://docs.aws.amazon.com/datasync/latest/userguide/task-reports.html) .\n\nWhen using this parameter, your caller identity (the role that you're using DataSync with) must have the `iam:PassRole` permission. The [AWSDataSyncFullAccess](https://docs.aws.amazon.com/datasync/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-awsdatasyncfullaccess) policy includes this permission.", "title": "TaskReportConfig" } }, @@ -63684,7 +63678,7 @@ "title": "NetworkInterfaceCount" }, "OnDemandMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "markdownDescription": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "title": "OnDemandMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -63694,7 +63688,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for Spot Instance. This is the maximum you\u2019ll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -65874,7 +65868,7 @@ "type": "boolean" }, "AssociatePublicIpAddress": { - "markdownDescription": "Indicates whether to assign a public IPv4 address to an instance. Applies only if creating a network interface when launching an instance. The network interface must be the primary network interface. If launching into a default subnet, the default value is `true` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "markdownDescription": "Indicates whether to assign a public IPv4 address to an instance. Applies only if creating a network interface when launching an instance. The network interface must be the primary network interface. If launching into a default subnet, the default value is `true` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "title": "AssociatePublicIpAddress", "type": "boolean" }, @@ -66785,7 +66779,7 @@ "title": "NetworkInterfaceCount" }, "OnDemandMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "markdownDescription": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "title": "OnDemandMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -66795,7 +66789,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for Spot Instances. This is the maximum you\u2019ll pay for a Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -67191,7 +67185,7 @@ "type": "boolean" }, "AssociatePublicIpAddress": { - "markdownDescription": "Associates a public IPv4 address with eth0 for a new network interface.\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "markdownDescription": "Associates a public IPv4 address with eth0 for a new network interface.\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "title": "AssociatePublicIpAddress", "type": "boolean" }, @@ -71090,7 +71084,7 @@ "additionalProperties": false, "properties": { "AssociatePublicIpAddress": { - "markdownDescription": "Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is `true` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "markdownDescription": "Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is `true` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [Amazon VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "title": "AssociatePublicIpAddress", "type": "boolean" }, @@ -71274,7 +71268,7 @@ "title": "NetworkInterfaceCount" }, "OnDemandMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", + "markdownDescription": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "title": "OnDemandMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -71284,7 +71278,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "The price protection threshold for Spot Instance. This is the maximum you\u2019ll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -71945,7 +71939,7 @@ "type": "number" }, "MapPublicIpOnLaunch": { - "markdownDescription": "Indicates whether instances launched in this subnet receive a public IPv4 address. The default value is `false` .\n\nStarting on February 1, 2024, AWS will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", + "markdownDescription": "Indicates whether instances launched in this subnet receive a public IPv4 address. The default value is `false` .\n\nAWS charges for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the *Public IPv4 Address* tab on the [VPC pricing page](https://docs.aws.amazon.com/vpc/pricing/) .", "title": "MapPublicIpOnLaunch", "type": "boolean" }, @@ -77423,7 +77417,7 @@ "additionalProperties": false, "properties": { "ContainerName": { - "markdownDescription": "The name of the container (as it appears in a container definition) to associate with the load balancer.", + "markdownDescription": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", "title": "ContainerName", "type": "string" }, @@ -78136,7 +78130,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.SystemControl" }, - "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.\n\nWe don't recommend that you specify network-related `systemControls` parameters for multiple containers in a single task that also uses either the `awsvpc` or `host` network mode. Doing this has the following disadvantages:\n\n- For tasks that use the `awsvpc` network mode including Fargate, if you set `systemControls` for any container, it applies to all containers in the task. If you set different `systemControls` for multiple containers in a single task, the container that's started last determines which `systemControls` take effect.\n- For tasks that use the `host` network mode, the network namespace `systemControls` aren't supported.\n\nIf you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see [IPC mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_definition_ipcmode) .\n\n- For tasks that use the `host` IPC mode, IPC namespace `systemControls` aren't supported.\n- For tasks that use the `task` IPC mode, IPC namespace `systemControls` values apply to all containers within a task.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "title": "SystemControls", "type": "array" }, @@ -78974,7 +78968,7 @@ "additionalProperties": false, "properties": { "ContainerName": { - "markdownDescription": "The name of the container (as it appears in a container definition) to associate with the load balancer.", + "markdownDescription": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", "title": "ContainerName", "type": "string" }, @@ -79307,7 +79301,7 @@ "type": "array" }, "PerformanceMode": { - "markdownDescription": "The Performance mode of the file system. We recommend `generalPurpose` performance mode for all file systems. File systems using the `maxIO` performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. The `maxIO` mode is not supported on One Zone file systems.\n\n> Due to the higher per-operation latencies with Max I/O, we recommend using General Purpose performance mode for all file systems. \n\nDefault is `generalPurpose` .", + "markdownDescription": "The performance mode of the file system. We recommend `generalPurpose` performance mode for all file systems. File systems using the `maxIO` performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. The `maxIO` mode is not supported on One Zone file systems.\n\n> Due to the higher per-operation latencies with Max I/O, we recommend using General Purpose performance mode for all file systems. \n\nDefault is `generalPurpose` .", "title": "PerformanceMode", "type": "string" }, @@ -87475,7 +87469,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::LoadBalancer.SubnetMapping" }, - "markdownDescription": "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", + "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.", "title": "SubnetMappings", "type": "array" }, @@ -87483,7 +87477,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", + "markdownDescription": "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.\n\n[Application Load Balancers] You must specify subnets from at least two Availability Zones.\n\n[Application Load Balancers on Outposts] You must specify one Outpost subnet.\n\n[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.\n\n[Network Load Balancers] You can specify subnets from one or more Availability Zones.\n\n[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.", "title": "Subnets", "type": "array" }, @@ -87774,7 +87768,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) cannot be set independently. The value you set for both attributes must be the same.", "title": "Key", "type": "string" }, @@ -92867,7 +92861,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "markdownDescription": "A list of `Tag` values, with a maximum of 50 elements.", "title": "Tags", "type": "array" } @@ -93029,7 +93023,7 @@ "type": "array" }, "StorageCapacity": { - "markdownDescription": "Sets the storage capacity of the file system that you're creating.\n\n`StorageCapacity` is required if you are creating a new file system.\n\n*FSx for Lustre file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` and the Lustre `DeploymentType` , as follows:\n\n- For `SCRATCH_2` , `PERSISTENT_2` and `PERSISTENT_1` deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.\n- For `PERSISTENT_1` HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.\n- For `SCRATCH_1` deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.\n\n*FSx for ONTAP file systems* - The amount of storage capacity that you can configure is from 1024 GiB up to 196,608 GiB (192 TiB).\n\n*FSx for OpenZFS file systems* - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). If you are creating a file system from a backup, you can specify a storage capacity equal to or greater than the original file system's storage capacity.\n\n*FSx for Windows File Server file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` as follows:\n\n- For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).\n- For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).", + "markdownDescription": "Sets the storage capacity of the file system that you're creating.\n\n`StorageCapacity` is required if you are creating a new file system. It is not required if you are creating a file system by restoring a backup.\n\n*FSx for Lustre file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` and the Lustre `DeploymentType` , as follows:\n\n- For `SCRATCH_2` , `PERSISTENT_2` and `PERSISTENT_1` deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.\n- For `PERSISTENT_1` HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.\n- For `SCRATCH_1` deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.\n\n*FSx for ONTAP file systems* - The amount of storage capacity that you can configure is from 1024 GiB up to 196,608 GiB (192 TiB).\n\n*FSx for OpenZFS file systems* - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). If you are creating a file system from a backup, you can specify a storage capacity equal to or greater than the original file system's storage capacity.\n\n*FSx for Windows File Server file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` as follows:\n\n- For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).\n- For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).", "title": "StorageCapacity", "type": "number" }, @@ -93050,7 +93044,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "markdownDescription": "The tags to associate with the file system. For more information, see [Tagging your Amazon FSx resources](https://docs.aws.amazon.com/fsx/latest/LustreGuide/tag-resources.html) in the *Amazon FSx for Lustre User Guide* .", "title": "Tags", "type": "array" }, @@ -93161,7 +93155,7 @@ "type": "number" }, "CopyTagsToBackups": { - "markdownDescription": "A Boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. Only valid for use with `PERSISTENT_1` deployment types.", + "markdownDescription": "(Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If `CopyTagsToBackups` is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If `CopyTagsToBackups` is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.\n\n(Default = `false` )\n\nFor more information, see [Working with backups](https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html) in the *Amazon FSx for Lustre User Guide* .", "title": "CopyTagsToBackups", "type": "boolean" }, @@ -93261,7 +93255,7 @@ "type": "string" }, "HAPairs": { - "markdownDescription": "", + "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "title": "HAPairs", "type": "number" }, @@ -93284,7 +93278,7 @@ "type": "number" }, "ThroughputCapacityPerHAPair": { - "markdownDescription": "", + "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "title": "ThroughputCapacityPerHAPair", "type": "number" }, @@ -93597,7 +93591,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "markdownDescription": "A list of `Tag` values, with a maximum of 50 elements.", "title": "Tags", "type": "array" }, @@ -93685,7 +93679,7 @@ "type": "string" }, "RootVolumeSecurityStyle": { - "markdownDescription": "The security style of the root volume of the SVM. Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.", + "markdownDescription": "The security style of the root volume of the SVM. Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Microsoft Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Microsoft Windows user as the service account.\n- `MIXED` This is an advanced setting. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style.html) in the Amazon FSx for NetApp ONTAP User Guide.", "title": "RootVolumeSecurityStyle", "type": "string" }, @@ -93698,7 +93692,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "markdownDescription": "A list of `Tag` values, with a maximum of 50 elements.", "title": "Tags", "type": "array" } @@ -93740,7 +93734,7 @@ }, "SelfManagedActiveDirectoryConfiguration": { "$ref": "#/definitions/AWS::FSx::StorageVirtualMachine.SelfManagedActiveDirectoryConfiguration", - "markdownDescription": "The configuration that Amazon FSx uses to join the ONTAP storage virtual machine (SVM) to your self-managed (including on-premises) Microsoft Active Directory (AD) directory.", + "markdownDescription": "The configuration that Amazon FSx uses to join the ONTAP storage virtual machine (SVM) to your self-managed (including on-premises) Microsoft Active Directory directory.", "title": "SelfManagedActiveDirectoryConfiguration" } }, @@ -109747,7 +109741,7 @@ }, "Tags": { "additionalProperties": true, - "markdownDescription": "The tags of the image.", + "markdownDescription": "The tags that apply to this image.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110640,12 +110634,12 @@ "type": "string" }, "ExecutionRole": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the IAM role that Image Builder uses to run the lifecycle policy. This is a custom role that you create.", + "markdownDescription": "The name or Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access to run lifecycle actions.", "title": "ExecutionRole", "type": "string" }, "Name": { - "markdownDescription": "The name of the lifecycle policy.", + "markdownDescription": "The name of the lifecycle policy to create.", "title": "Name", "type": "string" }, @@ -110653,17 +110647,17 @@ "items": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.PolicyDetail" }, - "markdownDescription": "The configuration details for a lifecycle policy resource.", + "markdownDescription": "Configuration details for the lifecycle policy rules.", "title": "PolicyDetails", "type": "array" }, "ResourceSelection": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.ResourceSelection", - "markdownDescription": "Resource selection criteria used to run the lifecycle policy.", + "markdownDescription": "Selection criteria for the resources that the lifecycle policy applies to.", "title": "ResourceSelection" }, "ResourceType": { - "markdownDescription": "The type of resources the lifecycle policy targets.", + "markdownDescription": "The type of Image Builder resource that the lifecycle policy applies to.", "title": "ResourceType", "type": "string" }, @@ -110674,7 +110668,7 @@ }, "Tags": { "additionalProperties": true, - "markdownDescription": "To help manage your lifecycle policy resources, you can assign your own metadata to each resource in the form of tags. Each tag consists of a key and an optional value, both of which you define.", + "markdownDescription": "Tags to apply to the lifecycle policy resource.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110719,11 +110713,11 @@ "properties": { "IncludeResources": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.IncludeResources", - "markdownDescription": "", + "markdownDescription": "Specifies the resources that the lifecycle policy applies to.", "title": "IncludeResources" }, "Type": { - "markdownDescription": "", + "markdownDescription": "Specifies the lifecycle action to take.", "title": "Type", "type": "string" } @@ -110737,20 +110731,20 @@ "additionalProperties": false, "properties": { "IsPublic": { - "markdownDescription": "", + "markdownDescription": "Configures whether public AMIs are excluded from the lifecycle action.", "title": "IsPublic", "type": "boolean" }, "LastLaunched": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.LastLaunched", - "markdownDescription": "", + "markdownDescription": "Specifies configuration details for Image Builder to exclude the most recent resources from lifecycle actions.", "title": "LastLaunched" }, "Regions": { "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Configures AWS Region s that are excluded from the lifecycle action.", "title": "Regions", "type": "array" }, @@ -110758,13 +110752,13 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Specifies AWS account s whose resources are excluded from the lifecycle action.", "title": "SharedAccounts", "type": "array" }, "TagMap": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "Lists tags that should be excluded from lifecycle actions for the AMIs that have them.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110781,12 +110775,12 @@ "properties": { "Amis": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.AmiExclusionRules", - "markdownDescription": "", + "markdownDescription": "Lists configuration values that apply to AMIs that Image Builder should exclude from the lifecycle action.", "title": "Amis" }, "TagMap": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "Contains a list of tags that Image Builder uses to skip lifecycle actions for Image Builder image resources that have them.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110802,22 +110796,22 @@ "additionalProperties": false, "properties": { "RetainAtLeast": { - "markdownDescription": "", + "markdownDescription": "For age-based filters, this is the number of resources to keep on hand after the lifecycle `DELETE` action is applied. Impacted resources are only deleted if you have more than this number of resources. If you have fewer resources than this number, the impacted resource is not deleted.", "title": "RetainAtLeast", "type": "number" }, "Type": { - "markdownDescription": "", + "markdownDescription": "Filter resources based on either `age` or `count` .", "title": "Type", "type": "string" }, "Unit": { - "markdownDescription": "", + "markdownDescription": "Defines the unit of time that the lifecycle policy uses to determine impacted resources. This is required for age-based rules.", "title": "Unit", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "The number of units for the time period or for the count. For example, a value of `6` might refer to six months or six AMIs.\n\n> For count-based filters, this value represents the minimum number of resources to keep on hand. If you have fewer resources than this number, the resource is excluded from lifecycle actions.", "title": "Value", "type": "number" } @@ -110832,17 +110826,17 @@ "additionalProperties": false, "properties": { "Amis": { - "markdownDescription": "", + "markdownDescription": "Specifies whether the lifecycle action should apply to distributed AMIs.", "title": "Amis", "type": "boolean" }, "Containers": { - "markdownDescription": "", + "markdownDescription": "Specifies whether the lifecycle action should apply to distributed containers.", "title": "Containers", "type": "boolean" }, "Snapshots": { - "markdownDescription": "", + "markdownDescription": "Specifies whether the lifecycle action should apply to snapshots associated with distributed AMIs.", "title": "Snapshots", "type": "boolean" } @@ -110853,12 +110847,12 @@ "additionalProperties": false, "properties": { "Unit": { - "markdownDescription": "", + "markdownDescription": "Defines the unit of time that the lifecycle policy uses to calculate elapsed time since the last instance launched from the AMI. For example: days, weeks, months, or years.", "title": "Unit", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "The integer number of units for the time period. For example `6` (months).", "title": "Value", "type": "number" } @@ -110874,17 +110868,17 @@ "properties": { "Action": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.Action", - "markdownDescription": "", + "markdownDescription": "Configuration details for the policy action.", "title": "Action" }, "ExclusionRules": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.ExclusionRules", - "markdownDescription": "", + "markdownDescription": "Additional rules to specify resources that should be exempt from policy actions.", "title": "ExclusionRules" }, "Filter": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.Filter", - "markdownDescription": "", + "markdownDescription": "Specifies the resources that the lifecycle policy applies to.", "title": "Filter" } }, @@ -110898,12 +110892,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "", + "markdownDescription": "The name of an Image Builder recipe that the lifecycle policy uses for resource selection.", "title": "Name", "type": "string" }, "SemanticVersion": { - "markdownDescription": "", + "markdownDescription": "The version of the Image Builder recipe specified by the `name` field.", "title": "SemanticVersion", "type": "string" } @@ -110920,13 +110914,13 @@ "items": { "$ref": "#/definitions/AWS::ImageBuilder::LifecyclePolicy.RecipeSelection" }, - "markdownDescription": "", + "markdownDescription": "A list of recipes that are used as selection criteria for the output images that the lifecycle policy applies to.", "title": "Recipes", "type": "array" }, "TagMap": { "additionalProperties": true, - "markdownDescription": "", + "markdownDescription": "A list of tags that are used as selection criteria for the Image Builder image resources that the lifecycle policy applies to.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -110979,28 +110973,28 @@ "type": "string" }, "Data": { - "markdownDescription": "Contains the YAML document content for the workflow.", + "markdownDescription": "Contains the UTF-8 encoded YAML document content for the workflow. Alternatively, you can specify the `uri` of a YAML document file stored in Amazon S3. However, you cannot specify both properties.", "title": "Data", "type": "string" }, "Description": { - "markdownDescription": "The description of the workflow.", + "markdownDescription": "Describes the workflow.", "title": "Description", "type": "string" }, "KmsKeyId": { - "markdownDescription": "The KMS key identifier used to encrypt the workflow resource.", + "markdownDescription": "The ID of the KMS key that is used to encrypt this workflow resource.", "title": "KmsKeyId", "type": "string" }, "Name": { - "markdownDescription": "The name of the workflow resource.", + "markdownDescription": "The name of the workflow to create.", "title": "Name", "type": "string" }, "Tags": { "additionalProperties": true, - "markdownDescription": "The tags that apply to the workflow resource", + "markdownDescription": "Tags that apply to the workflow resource.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -111010,17 +111004,17 @@ "type": "object" }, "Type": { - "markdownDescription": "Specifies the image creation stage that the workflow applies to. Image Builder currently supports build and test workflows.", + "markdownDescription": "The phase in the image build process for which the workflow resource is responsible.", "title": "Type", "type": "string" }, "Uri": { - "markdownDescription": "", + "markdownDescription": "The `uri` of a YAML component document file. This must be an S3 URL ( `s3://bucket/key` ), and the requester must have permission to access the S3 bucket it points to. If you use Amazon S3, you can specify component content up to your service quota.\n\nAlternatively, you can specify the YAML document inline, using the component `data` property. You cannot specify both properties.", "title": "Uri", "type": "string" }, "Version": { - "markdownDescription": "The workflow resource version. Workflow resources are immutable. To make a change, you can clone a workflow or create a new version.", + "markdownDescription": "The semantic version of this workflow resource. The semantic version syntax adheres to the following rules.\n\n> The semantic version has four nodes: ../. You can assign values for the first three, and can filter on all of them.\n> \n> *Assignment:* For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.\n> \n> *Patterns:* You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.", "title": "Version", "type": "string" } @@ -124303,7 +124297,7 @@ }, "SidewalkResponse": { "$ref": "#/definitions/AWS::IoTWireless::PartnerAccount.SidewalkAccountInfoWithFingerprint", - "markdownDescription": "", + "markdownDescription": "Information about a Sidewalk account.", "title": "SidewalkResponse" }, "SidewalkUpdate": { @@ -124854,13 +124848,13 @@ "additionalProperties": false, "properties": { "DevAddr": { - "markdownDescription": "", + "markdownDescription": "The DevAddr value.", "title": "DevAddr", "type": "string" }, "SessionKeys": { "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.SessionKeysAbpV10x", - "markdownDescription": "", + "markdownDescription": "Session keys for ABP v1.0.x.", "title": "SessionKeys" } }, @@ -124895,7 +124889,7 @@ "properties": { "AbpV10x": { "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.AbpV10x", - "markdownDescription": "", + "markdownDescription": "ABP device object for LoRaWAN specification v1.0.x.", "title": "AbpV10x" }, "AbpV11": { @@ -124935,12 +124929,12 @@ "additionalProperties": false, "properties": { "AppEui": { - "markdownDescription": "", + "markdownDescription": "The AppEUI value. You specify this value when using LoRaWAN versions v1.0.2 or v1.0.3.", "title": "AppEui", "type": "string" }, "AppKey": { - "markdownDescription": "", + "markdownDescription": "The AppKey value.", "title": "AppKey", "type": "string" } @@ -124981,12 +124975,12 @@ "additionalProperties": false, "properties": { "AppSKey": { - "markdownDescription": "", + "markdownDescription": "The AppSKey value.", "title": "AppSKey", "type": "string" }, "NwkSKey": { - "markdownDescription": "", + "markdownDescription": "The NwkKey value.", "title": "NwkSKey", "type": "string" } @@ -130740,16 +130734,16 @@ }, "RetryOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.AmazonOpenSearchServerlessRetryOptions", - "markdownDescription": "The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).", + "markdownDescription": "The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).", "title": "RetryOptions" }, "RoleARN": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.", "title": "RoleARN", "type": "string" }, "S3BackupMode": { - "markdownDescription": "Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.", + "markdownDescription": "Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.", "title": "S3BackupMode", "type": "string" }, @@ -130775,7 +130769,7 @@ "additionalProperties": false, "properties": { "DurationInSeconds": { - "markdownDescription": "After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.", + "markdownDescription": "After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.", "title": "DurationInSeconds", "type": "number" } @@ -130818,7 +130812,7 @@ }, "DocumentIdOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.DocumentIdOptions", - "markdownDescription": "Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.", + "markdownDescription": "Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.", "title": "DocumentIdOptions" }, "DomainARN": { @@ -130981,12 +130975,12 @@ }, "InputFormatConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.InputFormatConfiguration", - "markdownDescription": "Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if `Enabled` is set to true.", + "markdownDescription": "Specifies the deserializer that you want Firehose to use to convert the format of your data from JSON. This parameter is required if `Enabled` is set to true.", "title": "InputFormatConfiguration" }, "OutputFormatConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.OutputFormatConfiguration", - "markdownDescription": "Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if `Enabled` is set to true.", + "markdownDescription": "Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if `Enabled` is set to true.", "title": "OutputFormatConfiguration" }, "SchemaConfiguration": { @@ -131001,7 +130995,7 @@ "additionalProperties": false, "properties": { "KeyARN": { - "markdownDescription": "If you set `KeyType` to `CUSTOMER_MANAGED_CMK` , you must specify the Amazon Resource Name (ARN) of the CMK. If you set `KeyType` to `AWS _OWNED_CMK` , Kinesis Data Firehose uses a service-account CMK.", + "markdownDescription": "If you set `KeyType` to `CUSTOMER_MANAGED_CMK` , you must specify the Amazon Resource Name (ARN) of the CMK. If you set `KeyType` to `AWS _OWNED_CMK` , Firehose uses a service-account CMK.", "title": "KeyARN", "type": "string" }, @@ -131021,12 +131015,12 @@ "properties": { "HiveJsonSerDe": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.HiveJsonSerDe", - "markdownDescription": "The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", + "markdownDescription": "The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", "title": "HiveJsonSerDe" }, "OpenXJsonSerDe": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.OpenXJsonSerDe", - "markdownDescription": "The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.", + "markdownDescription": "The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.", "title": "OpenXJsonSerDe" } }, @@ -131036,7 +131030,7 @@ "additionalProperties": false, "properties": { "DefaultDocumentIdFormat": { - "markdownDescription": "When the `FIREHOSE_DEFAULT` option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.", + "markdownDescription": "When the `FIREHOSE_DEFAULT` option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.", "title": "DefaultDocumentIdFormat", "type": "string" } @@ -131098,7 +131092,7 @@ }, "DocumentIdOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.DocumentIdOptions", - "markdownDescription": "Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.", + "markdownDescription": "Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.", "title": "DocumentIdOptions" }, "DomainARN": { @@ -131268,7 +131262,7 @@ "items": { "type": "string" }, - "markdownDescription": "Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://docs.aws.amazon.com/https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) . You can also use the special value `millis` to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses `java.sql.Timestamp::valueOf` by default.", + "markdownDescription": "Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://docs.aws.amazon.com/https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) . You can also use the special value `millis` to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses `java.sql.Timestamp::valueOf` by default.", "title": "TimestampFormats", "type": "array" } @@ -131468,7 +131462,7 @@ "additionalProperties": false, "properties": { "CaseInsensitive": { - "markdownDescription": "When set to `true` , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.", + "markdownDescription": "When set to `true` , which is the default, Firehose converts JSON keys to lowercase before deserializing them.", "title": "CaseInsensitive", "type": "boolean" }, @@ -131484,7 +131478,7 @@ "type": "object" }, "ConvertDotsInJsonKeysToUnderscores": { - "markdownDescription": "When set to `true` , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.\n\nThe default is `false` .", + "markdownDescription": "When set to `true` , specifies that the names of the keys include dots and that you want Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.\n\nThe default is `false` .", "title": "ConvertDotsInJsonKeysToUnderscores", "type": "boolean" } @@ -131495,7 +131489,7 @@ "additionalProperties": false, "properties": { "BlockSizeBytes": { - "markdownDescription": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.", + "markdownDescription": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.", "title": "BlockSizeBytes", "type": "number" }, @@ -131503,7 +131497,7 @@ "items": { "type": "string" }, - "markdownDescription": "The column names for which you want Kinesis Data Firehose to create bloom filters. The default is `null` .", + "markdownDescription": "The column names for which you want Firehose to create bloom filters. The default is `null` .", "title": "BloomFilterColumns", "type": "array" }, @@ -131565,7 +131559,7 @@ "additionalProperties": false, "properties": { "BlockSizeBytes": { - "markdownDescription": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.", + "markdownDescription": "The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.", "title": "BlockSizeBytes", "type": "number" }, @@ -131688,7 +131682,7 @@ }, "RetryOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.RedshiftRetryOptions", - "markdownDescription": "The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).", + "markdownDescription": "The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).", "title": "RetryOptions" }, "RoleARN": { @@ -131731,7 +131725,7 @@ "additionalProperties": false, "properties": { "DurationInSeconds": { - "markdownDescription": "The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of `DurationInSeconds` is 0 (zero) or if the first delivery attempt takes longer than the current value.", + "markdownDescription": "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of `DurationInSeconds` is 0 (zero) or if the first delivery attempt takes longer than the current value.", "title": "DurationInSeconds", "type": "number" } @@ -131818,7 +131812,7 @@ "type": "string" }, "RoleARN": { - "markdownDescription": "The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `RoleARN` property is required and its value must be specified.", + "markdownDescription": "The role that Firehose can use to access AWS Glue. This role must be in the same account you use for Firehose. Cross-account roles aren't allowed.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `RoleARN` property is required and its value must be specified.", "title": "RoleARN", "type": "string" }, @@ -131828,7 +131822,7 @@ "type": "string" }, "VersionId": { - "markdownDescription": "Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to `LATEST` , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.", + "markdownDescription": "Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to `LATEST` , Firehose uses the most recent version. This means that any updates to the table are automatically picked up.", "title": "VersionId", "type": "string" } @@ -131881,12 +131875,12 @@ "title": "CloudWatchLoggingOptions" }, "HECAcknowledgmentTimeoutInSeconds": { - "markdownDescription": "The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.", + "markdownDescription": "The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.", "title": "HECAcknowledgmentTimeoutInSeconds", "type": "number" }, "HECEndpoint": { - "markdownDescription": "The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.", + "markdownDescription": "The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.", "title": "HECEndpoint", "type": "string" }, @@ -131907,11 +131901,11 @@ }, "RetryOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SplunkRetryOptions", - "markdownDescription": "The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.", + "markdownDescription": "The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.", "title": "RetryOptions" }, "S3BackupMode": { - "markdownDescription": "Defines how documents should be delivered to Amazon S3. When set to `FailedEventsOnly` , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to `AllEvents` , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is `FailedEventsOnly` .\n\nYou can update this backup mode from `FailedEventsOnly` to `AllEvents` . You can't update it from `AllEvents` to `FailedEventsOnly` .", + "markdownDescription": "Defines how documents should be delivered to Amazon S3. When set to `FailedEventsOnly` , Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to `AllEvents` , Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is `FailedEventsOnly` .\n\nYou can update this backup mode from `FailedEventsOnly` to `AllEvents` . You can't update it from `AllEvents` to `FailedEventsOnly` .", "title": "S3BackupMode", "type": "string" }, @@ -131933,7 +131927,7 @@ "additionalProperties": false, "properties": { "DurationInSeconds": { - "markdownDescription": "The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.", + "markdownDescription": "The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt.", "title": "DurationInSeconds", "type": "number" } @@ -133811,7 +133805,7 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.", + "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "title": "Destination", "type": "string" } @@ -133887,7 +133881,7 @@ }, "DestinationConfig": { "$ref": "#/definitions/AWS::Lambda::EventSourceMapping.DestinationConfig", - "markdownDescription": "(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.", + "markdownDescription": "(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.", "title": "DestinationConfig" }, "DocumentDBEventSourceConfig": { @@ -134110,7 +134104,7 @@ "additionalProperties": false, "properties": { "Destination": { - "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.", + "markdownDescription": "The Amazon Resource Name (ARN) of the destination resource.\n\nTo retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n\nTo retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) , you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n\nTo retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "title": "Destination", "type": "string" } @@ -141677,7 +141671,7 @@ "type": "array" }, "Name": { - "markdownDescription": "A name for the query definition.", + "markdownDescription": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `/ *folder-name* / *query-name*` .", "title": "Name", "type": "string" }, @@ -161737,12 +161731,12 @@ "properties": { "BufferOptions": { "$ref": "#/definitions/AWS::OSIS::Pipeline.BufferOptions", - "markdownDescription": "Options that specify the configuration of a persistent buffer. To configure how OpenSearch Ingestion encrypts this data, set the EncryptionAtRestOptions.", + "markdownDescription": "Options that specify the configuration of a persistent buffer. To configure how OpenSearch Ingestion encrypts this data, set the `EncryptionAtRestOptions` . For more information, see [Persistent buffering](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/osis-features-overview.html#persistent-buffering) .", "title": "BufferOptions" }, "EncryptionAtRestOptions": { "$ref": "#/definitions/AWS::OSIS::Pipeline.EncryptionAtRestOptions", - "markdownDescription": "Options to control how OpenSearch encrypts all data-at-rest.", + "markdownDescription": "Options to control how OpenSearch encrypts buffer data.", "title": "EncryptionAtRestOptions" }, "LogPublishingOptions": { @@ -161831,7 +161825,7 @@ "additionalProperties": false, "properties": { "LogGroup": { - "markdownDescription": "The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, `/aws/OpenSearchService/IngestionService/my-pipeline` .", + "markdownDescription": "The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, `/aws/vendedlogs/OpenSearchService/pipelines` .", "title": "LogGroup", "type": "string" } @@ -161845,7 +161839,7 @@ "additionalProperties": false, "properties": { "KmsKeyArn": { - "markdownDescription": "The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion. By default, data is encrypted using an AWS owned key.", + "markdownDescription": "The ARN of the KMS key used to encrypt buffer data. By default, data is encrypted using an AWS owned key.", "title": "KmsKeyArn", "type": "string" } @@ -162971,7 +162965,7 @@ "type": "string" }, "StandbyReplicas": { - "markdownDescription": "Details about an OpenSearch Serverless collection.", + "markdownDescription": "Indicates whether standby replicas should be used for a collection.", "title": "StandbyReplicas", "type": "string" }, @@ -170292,7 +170286,7 @@ "type": "string" }, "DestinationStreamArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis data stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", "title": "DestinationStreamArn", "type": "string" }, @@ -214795,12 +214789,12 @@ }, "ScalingConfiguration": { "$ref": "#/definitions/AWS::RDS::DBCluster.ScalingConfiguration", - "markdownDescription": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, use `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", "title": "ScalingConfiguration" }, "ServerlessV2ScalingConfiguration": { "$ref": "#/definitions/AWS::RDS::DBCluster.ServerlessV2ScalingConfiguration", - "markdownDescription": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, use `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The `ServerlessV2ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora DB clusters only", "title": "ServerlessV2ScalingConfiguration" }, "SnapshotIdentifier": { @@ -233881,7 +233875,7 @@ "properties": { "FileSystemConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.FileSystemConfig", - "markdownDescription": "The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.", + "markdownDescription": "The Amazon Elastic File System storage configuration for a SageMaker image.", "title": "FileSystemConfig" }, "KernelSpecs": { @@ -234808,7 +234802,7 @@ }, "DefaultSpaceSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.DefaultSpaceSettings", - "markdownDescription": "A collection of settings that apply to spaces created in the Domain.", + "markdownDescription": "A collection of settings that apply to spaces created in the domain.", "title": "DefaultSpaceSettings" }, "DefaultUserSettings": { @@ -234888,7 +234882,7 @@ "properties": { "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", - "markdownDescription": "", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "title": "DefaultResourceSpec" }, "LifecycleConfigArns": { @@ -235014,7 +235008,7 @@ "items": { "type": "string" }, - "markdownDescription": "The security group IDs for the Amazon Virtual Private Cloud that the space uses for communication.", + "markdownDescription": "The security group IDs for the Amazon VPC that the space uses for communication.", "title": "SecurityGroups", "type": "array" } @@ -235094,7 +235088,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", - "markdownDescription": "", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", "title": "DefaultResourceSpec" }, "LifecycleConfigArns": { @@ -235132,7 +235126,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -239255,7 +239249,7 @@ }, "EndpointInput": { "$ref": "#/definitions/AWS::SageMaker::ModelExplainabilityJobDefinition.EndpointInput", - "markdownDescription": "", + "markdownDescription": "Input object for the endpoint", "title": "EndpointInput" } }, @@ -242246,7 +242240,7 @@ "additionalProperties": false, "properties": { "DomainId": { - "markdownDescription": "The ID of the associated Domain.", + "markdownDescription": "The ID of the associated domain.", "title": "DomainId", "type": "string" }, @@ -242345,7 +242339,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -242489,7 +242483,7 @@ "properties": { "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", - "markdownDescription": "", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "title": "DefaultResourceSpec" }, "LifecycleConfigArns": { @@ -242644,7 +242638,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", - "markdownDescription": "", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", "title": "DefaultResourceSpec" }, "LifecycleConfigArns": { @@ -242682,7 +242676,7 @@ }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS Command Line Interface or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -242885,7 +242879,7 @@ "type": "array" }, "WorkforceName": { - "markdownDescription": "", + "markdownDescription": "The name of the workforce.", "title": "WorkforceName", "type": "string" }, @@ -259512,7 +259506,7 @@ "type": "array" }, "UserName": { - "markdownDescription": "The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.", + "markdownDescription": "The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.\n\nThe reserved keyword, `[UNDEFINED]` , is used when creating user-decoupled WorkSpaces.", "title": "UserName", "type": "string" }, @@ -259872,7 +259866,7 @@ "properties": { "IdentityProviderDetails": { "additionalProperties": true, - "markdownDescription": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- For Facebook:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- `api_version`\n- For Sign in with Apple:\n\n- `client_id`\n- `team_id`\n- `key_id`\n- `private_key`\n- `authorize_scopes`\n- For OIDC providers:\n\n- `client_id`\n- `client_secret`\n- `attributes_request_method`\n- `oidc_issuer`\n- `authorize_scopes`\n- `authorize_url` *if not available from discovery URL specified by oidc_issuer key*\n- `token_url` *if not available from discovery URL specified by oidc_issuer key*\n- `attributes_url` *if not available from discovery URL specified by oidc_issuer key*\n- `jwks_uri` *if not available from discovery URL specified by oidc_issuer key*\n- For SAML providers:\n\n- `MetadataFile` OR `MetadataURL`\n- `IDPSignout` *optional*", + "markdownDescription": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- For Facebook:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- `api_version`\n- For Sign in with Apple:\n\n- `client_id`\n- `team_id`\n- `key_id`\n- `private_key`\n- `authorize_scopes`\n- For OIDC providers:\n\n- `client_id`\n- `client_secret`\n- `attributes_request_method`\n- `oidc_issuer`\n- `authorize_scopes`\n- `authorize_url` *if not available from discovery URL specified by oidc_issuer key*\n- `token_url` *if not available from discovery URL specified by oidc_issuer key*\n- `attributes_url` *if not available from discovery URL specified by oidc_issuer key*\n- `jwks_uri` *if not available from discovery URL specified by oidc_issuer key*\n- For SAML providers:\n\n- `MetadataFile` OR `MetadataURL`\n- `IDPSignout` (boolean) *optional*\n- `IDPInit` (boolean) *optional*\n- `RequestSigningAlgorithm` (string) *optional* - Only accepts `rsa-sha256`\n- `EncryptedResponses` (boolean) *optional*", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string"