From ab7f8ca31514cb9263ee1595ba486efbb9d9bd92 Mon Sep 17 00:00:00 2001 From: AWS CDK Automation <43080478+aws-cdk-automation@users.noreply.github.com> Date: Thu, 5 May 2022 02:46:36 -0700 Subject: [PATCH] docs(cfnspec): update CloudFormation documentation (#20226) --- .../spec-source/cfn-docs/cfn-docs.json | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json index a3ca127d7bdab..f1e25d35359ed 100644 --- a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json +++ b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json @@ -7324,10 +7324,10 @@ "EnableLogFileValidation": "Specifies whether log file validation is enabled. The default is false.\n\n> When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail does not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail.", "EventSelectors": "Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.\n\nYou can configure up to five event selectors for a trail.\n\nYou cannot apply both event selectors and advanced event selectors to a trail.", "IncludeGlobalServiceEvents": "Specifies whether the trail is publishing events from global services such as IAM to the log files.", - "InsightSelectors": "Specifies whether a trail has insight types specified in an `InsightSelector` list.", + "InsightSelectors": "", "IsLogging": "Whether the CloudTrail trail is currently logging AWS API calls.", "IsMultiRegionTrail": "Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region where it was created, and its shadow trails in other regions will be deleted. As a best practice, consider using trails that log events in all regions.", - "IsOrganizationTrail": "Specifies whether the trail is created for all accounts in an organization in AWS Organizations , or only for the current AWS account . The default is false, and cannot be true unless the call is made on behalf of an AWS account that is the management account for an organization in AWS Organizations .", + "IsOrganizationTrail": "Specifies whether the trail is applied to all accounts in an organization in AWS Organizations , or only for the current AWS account . The default is false, and cannot be true unless the call is made on behalf of an AWS account that is the management account for an organization in AWS Organizations . If the trail is not an organization trail and this is set to `true` , the trail will be created in all AWS accounts that belong to the organization. If the trail is an organization trail and this is set to `false` , the trail will remain in the current AWS account but be deleted from all member accounts in the organization.", "KMSKeyId": "Specifies the AWS KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be an alias name prefixed by \"alias/\", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.\n\nCloudTrail also supports AWS KMS multi-Region keys. For more information about multi-Region keys, see [Using multi-Region keys](https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) in the *AWS Key Management Service Developer Guide* .\n\nExamples:\n\n- alias/MyAliasName\n- arn:aws:kms:us-east-2:123456789012:alias/MyAliasName\n- arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012\n- 12345678-1234-1234-1234-123456789012", "S3BucketName": "Specifies the name of the Amazon S3 bucket designated for publishing log files. See [Amazon S3 Bucket Naming Requirements](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html) .", "S3KeyPrefix": "Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see [Finding Your CloudTrail Log Files](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html) . The maximum length is 200 characters.", @@ -7338,17 +7338,17 @@ }, "AWS::CloudTrail::Trail.DataResource": { "attributes": {}, - "description": "The Amazon S3 buckets, AWS Lambda functions, or Amazon DynamoDB tables that you specify in your event selectors for your trail to log data events. Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.\n\n> The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors.\n> \n> If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500. \n\nThe following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named `bucket-1` . In this example, the CloudTrail user specified an empty prefix, and the option to log both `Read` and `Write` data events.\n\n- A user uploads an image file to `bucket-1` .\n- The `PutObject` API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.\n- A user uploads an object to an Amazon S3 bucket named `arn:aws:s3:::bucket-2` .\n- The `PutObject` API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn\u2019t log the event.\n\nThe following example demonstrates how logging works when you configure logging of AWS Lambda data events for a Lambda function named *MyLambdaFunction* , but not for all Lambda functions.\n\n- A user runs a script that includes a call to the *MyLambdaFunction* function and the *MyOtherLambdaFunction* function.\n- The `Invoke` API operation on *MyLambdaFunction* is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for *MyLambdaFunction* , any invocations of that function are logged. The trail processes and logs the event.\n- The `Invoke` API operation on *MyOtherLambdaFunction* is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the `Invoke` operation for *MyOtherLambdaFunction* does not match the function specified for the trail. The trail doesn\u2019t log the event.", + "description": "The Amazon S3 buckets, AWS Lambda functions, or Amazon DynamoDB tables that you specify in event selectors in your AWS CloudFormation template for your trail to log data events. Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail. Currently, advanced event selectors for data events are not supported in AWS CloudFormation templates.\n\n> The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors.\n> \n> If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500. \n\nThe following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named `bucket-1` . In this example, the CloudTrail user specified an empty prefix, and the option to log both `Read` and `Write` data events.\n\n- A user uploads an image file to `bucket-1` .\n- The `PutObject` API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.\n- A user uploads an object to an Amazon S3 bucket named `arn:aws:s3:::bucket-2` .\n- The `PutObject` API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn\u2019t log the event.\n\nThe following example demonstrates how logging works when you configure logging of AWS Lambda data events for a Lambda function named *MyLambdaFunction* , but not for all Lambda functions.\n\n- A user runs a script that includes a call to the *MyLambdaFunction* function and the *MyOtherLambdaFunction* function.\n- The `Invoke` API operation on *MyLambdaFunction* is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for *MyLambdaFunction* , any invocations of that function are logged. The trail processes and logs the event.\n- The `Invoke` API operation on *MyOtherLambdaFunction* is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the `Invoke` operation for *MyOtherLambdaFunction* does not match the function specified for the trail. The trail doesn\u2019t log the event.", "properties": { - "Type": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::S3::Object`\n- `AWS::Lambda::Function`\n- `AWS::DynamoDB::Table`\n\nThe following resource types are also availble through *advanced* event selectors. Basic event selector resource types are valid in advanced event selectors, but advanced event selector resource types are not valid in basic event selectors. For more information, see [AdvancedFieldSelector:Field](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html#awscloudtrail-Type-AdvancedFieldSelector-Field) .\n\n- `AWS::S3Outposts::Object`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::EC2::Snapshot`\n- `AWS::S3::AccessPoint`\n- `AWS::DynamoDB::Stream`", - "Values": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified objects.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3:::` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::bucket-1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::bucket-1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` ." + "Type": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::S3::Object`\n- `AWS::Lambda::Function`\n- `AWS::DynamoDB::Table`", + "Values": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified objects.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::bucket-1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::bucket-1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` ." } }, "AWS::CloudTrail::Trail.EventSelector": { "attributes": {}, "description": "Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.\n\nYou can configure up to five event selectors for a trail.\n\nYou cannot apply both event selectors and advanced event selectors to a trail.", "properties": { - "DataResources": "CloudTrail supports data event logging for Amazon S3 objects and AWS Lambda functions. You can specify up to 250 resources for an individual event selector, but the total number of data resources cannot exceed 250 across all event selectors in a trail. This limit does not apply if you configure resource logging for all data events.\n\nFor more information, see [Data Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-data-events) and [Limits in AWS CloudTrail](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) in the *AWS CloudTrail User Guide* .", + "DataResources": "In AWS CloudFormation , CloudTrail supports data event logging for Amazon S3 objects, Amazon DynamoDB tables, and AWS Lambda functions. Currently, advanced event selectors for data events are not supported in AWS CloudFormation templates. You can specify up to 250 resources for an individual event selector, but the total number of data resources cannot exceed 250 across all event selectors in a trail. This limit does not apply if you configure resource logging for all data events.\n\nFor more information, see [Data Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-data-events) and [Limits in AWS CloudTrail](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) in the *AWS CloudTrail User Guide* .", "ExcludeManagementEventSources": "An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out AWS Key Management Service or Amazon RDS Data API events by containing `kms.amazonaws.com` or `rdsdata.amazonaws.com` . By default, `ExcludeManagementEventSources` is empty, and AWS KMS and Amazon RDS Data API events are logged to your trail. You can exclude management event sources only in regions that support the event source.", "IncludeManagementEvents": "Specify if you want your event selector to include management events for your trail.\n\nFor more information, see [Management Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-management-events) in the *AWS CloudTrail User Guide* .\n\nBy default, the value is `true` .\n\nThe first copy of management events is free. You are charged for additional copies of management events that you are logging on any subsequent trail in the same region. For more information about CloudTrail pricing, see [AWS CloudTrail Pricing](https://docs.aws.amazon.com/cloudtrail/pricing/) .", "ReadWriteType": "Specify if you want your trail to log read-only events, write-only events, or all. For example, the EC2 `GetConsoleOutput` is a read-only API operation and `RunInstances` is a write-only API operation.\n\nBy default, the value is `All` ." @@ -7454,7 +7454,7 @@ }, "AWS::CloudWatch::AnomalyDetector.Dimension": { "attributes": {}, - "description": "A dimension is a name/value pair that is part of the identity of a metric. Because dimensions are part of the unique identifier for a metric, whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric. For example, many Amazon EC2 metrics publish `InstanceId` as a dimension name, and the actual instance ID as the value for that dimension.\n\nYou can assign up to 10 dimensions to a metric.", + "description": "A dimension is a name/value pair that is part of the identity of a metric. Because dimensions are part of the unique identifier for a metric, whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric. For example, many Amazon EC2 metrics publish `InstanceId` as a dimension name, and the actual instance ID as the value for that dimension.\n\nYou can assign up to 30 dimensions to a metric.", "properties": { "Name": "The name of the dimension.", "Value": "The value of the dimension. Dimension values must contain only ASCII characters and must include at least one non-whitespace character." @@ -17585,9 +17585,9 @@ "attributes": {}, "description": "Specifies the configuration for experiment logging.", "properties": { - "CloudWatchLogsConfiguration": "The configuration for experiment logging to Amazon CloudWatch Logs. The supported field is `logGroupArn` . For example:\n\n`{\"logGroupArn\": \"aws:arn:logs: *region_name* : *account_id* :log-group: *log_group_name* \"}`", + "CloudWatchLogsConfiguration": "The configuration for experiment logging to Amazon CloudWatch Logs. The supported field is `LogGroupArn` . For example:\n\n`{\"LogGroupArn\": \"aws:arn:logs: *region_name* : *account_id* :log-group: *log_group_name* \"}`", "LogSchemaVersion": "The schema version. The supported value is 1.", - "S3Configuration": "The configuration for experiment logging to Amazon S3. The following fields are supported:\n\n- `bucketName` - The name of the destination bucket.\n- `prefix` - An optional bucket prefix.\n\nFor example:\n\n`{\"bucketName\": \" *my-s3-bucket* \", \"prefix\": \" *log-folder* \"}`" + "S3Configuration": "The configuration for experiment logging to Amazon S3. The following fields are supported:\n\n- `bucketName` - The name of the destination bucket.\n- `prefix` - An optional bucket prefix.\n\nFor example:\n\n`{\"BucketName\": \" *my-s3-bucket* \", \"Prefix\": \" *log-folder* \"}`" } }, "AWS::FIS::ExperimentTemplate.ExperimentTemplateStopCondition": { @@ -33710,7 +33710,7 @@ "EnableHttpEndpoint": "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.\n\nFor more information, see [Using the Data API for Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .", "EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n\nFor more information, see [IAM Database Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon Aurora User Guide.*", "Engine": "The name of the database engine to be used for this DB cluster.\n\nValid Values: `aurora` (for MySQL 5.6-compatible Aurora), `aurora-mysql` (for MySQL 5.7-compatible Aurora), and `aurora-postgresql`", - "EngineMode": "The DB engine mode of the DB cluster, either `provisioned` , `serverless` , `parallelquery` , `global` , or `multimaster` .\n\nThe `parallelquery` engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, and version 2.09 and higher 2.x versions.\n\nThe `global` engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, and `global` engine mode isn't required for any 2.x versions.\n\nThe `multimaster` engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.\n\nFor Aurora PostgreSQL, the `global` engine mode isn't required, and both the `parallelquery` and the `multimaster` engine modes currently aren't supported.\n\nLimitations and requirements apply to some DB engine modes. For more information, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Limitations of Parallel Query](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations)\n- [Limitations of Aurora Global Databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations)\n- [Limitations of Multi-Master Clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations)", + "EngineMode": "The DB engine mode of the DB cluster, either `provisioned` , `serverless` , `parallelquery` , `global` , or `multimaster` .\n\nThe `serverless` engine mode only supports Aurora Serverless v1. Currently, AWS CloudFormation doesn't support Aurora Serverless v2.\n\nThe `parallelquery` engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, and version 2.09 and higher 2.x versions.\n\nThe `global` engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, and `global` engine mode isn't required for any 2.x versions.\n\nThe `multimaster` engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.\n\nFor Aurora PostgreSQL, the `global` engine mode isn't required, and both the `parallelquery` and the `multimaster` engine modes currently aren't supported.\n\nLimitations and requirements apply to some DB engine modes. For more information, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Limitations of Parallel Query](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations)\n- [Limitations of Aurora Global Databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations)\n- [Limitations of Multi-Master Clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations)", "EngineVersion": "The version number of the database engine to use.\n\nTo list all of the available engine versions for `aurora` (for MySQL 5.6-compatible Aurora), use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"`\n\nTo list all of the available engine versions for `aurora-mysql` (for MySQL 5.7-compatible Aurora), use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"`\n\nTo list all of the available engine versions for `aurora-postgresql` , use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"`", "GlobalClusterIdentifier": "If you are configuring an Aurora global database cluster and want your Aurora DB cluster to be a secondary member in the global database cluster, specify the global cluster ID of the global database cluster. To define the primary database cluster of the global cluster, use the [AWS::RDS::GlobalCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-globalcluster.html) resource.\n\nIf you aren't configuring a global database cluster, don't specify this property.\n\n> To remove the DB cluster from a global database cluster, specify an empty value for the `GlobalClusterIdentifier` property. \n\nFor information about Aurora global databases, see [Working with Amazon Aurora Global Databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) in the *Amazon Aurora User Guide* .", "KmsKeyId": "The Amazon Resource Name (ARN) of the AWS KMS key that is used to encrypt the database instances in the DB cluster, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the `StorageEncrypted` property but don't specify this property, the default KMS key is used. If you specify this property, you must set the `StorageEncrypted` property to `true` .\n\nIf you specify the `SnapshotIdentifier` property, the `StorageEncrypted` property value is inherited from the snapshot, and if the DB cluster is encrypted, the specified `KmsKeyId` property is used.", @@ -33721,7 +33721,7 @@ "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Cluster Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the *Amazon Aurora User Guide.*\n\nValid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n\nConstraints: Minimum 30-minute window.", "ReplicationSourceIdentifier": "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.", "RestoreType": "The type of restore to be performed. You can specify one of the following values:\n\n- `full-copy` - The new DB cluster is restored as a full copy of the source DB cluster.\n- `copy-on-write` - The new DB cluster is restored as a clone of the source DB cluster.\n\nConstraints: You can't specify `copy-on-write` if the engine version of the source DB cluster is earlier than 1.11.\n\nIf you don't specify a `RestoreType` value, then the new DB cluster is restored as a full copy of the source DB cluster.", - "ScalingConfiguration": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.", + "ScalingConfiguration": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nCurrently, AWS CloudFormation only supports Aurora Serverless v1. AWS CloudFormation doesn't support Aurora Serverless v2.", "SnapshotIdentifier": "The identifier for the DB snapshot or DB cluster snapshot to restore from.\n\nYou can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.\n\nAfter you restore a DB cluster with a `SnapshotIdentifier` property, you must specify the same `SnapshotIdentifier` property for any future updates to the DB cluster. When you specify this property for an update, the DB cluster is not restored from the snapshot again, and the data in the database is not changed. However, if you don't specify the `SnapshotIdentifier` property, an empty DB cluster is created, and the original DB cluster is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB cluster is restored from the specified `SnapshotIdentifier` property, and the original DB cluster is deleted.\n\nIf you specify the `SnapshotIdentifier` property to restore a DB cluster (as opposed to specifying it for DB cluster updates), then don't specify the following properties:\n\n- `GlobalClusterIdentifier`\n- `MasterUsername`\n- `MasterUserPassword`\n- `ReplicationSourceIdentifier`\n- `RestoreType`\n- `SourceDBClusterIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `UseLatestRestorableTime`\n\nConstraints:\n\n- Must match the identifier of an existing Snapshot.", "SourceDBClusterIdentifier": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.", "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .", @@ -33741,7 +33741,7 @@ }, "AWS::RDS::DBCluster.ScalingConfiguration": { "attributes": {}, - "description": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nFor more information, see [Using Amazon Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) in the *Amazon Aurora User Guide* .", + "description": "The `ScalingConfiguration` property type specifies the scaling configuration of an Aurora Serverless DB cluster.\n\nFor more information, see [Using Amazon Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) in the *Amazon Aurora User Guide* .\n\nCurrently, AWS CloudFormation only supports Aurora Serverless v1. AWS CloudFormation doesn't support Aurora Serverless v2.", "properties": { "AutoPause": "A value that indicates whether to allow or disallow automatic pause for an Aurora DB cluster in `serverless` DB engine mode. A DB cluster can be paused only when it's idle (it has no connections).\n\n> If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it.", "MaxCapacity": "The maximum capacity for an Aurora DB cluster in `serverless` DB engine mode.\n\nFor Aurora MySQL, valid capacity values are `1` , `2` , `4` , `8` , `16` , `32` , `64` , `128` , and `256` .\n\nFor Aurora PostgreSQL, valid capacity values are `2` , `4` , `8` , `16` , `32` , `64` , `192` , and `384` .\n\nThe maximum capacity must be greater than or equal to the minimum capacity.",