diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d32079530e..6bbeacd77b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.44.239 (2023-04-07) +=== + +### Service Client Updates +* `service/dlm`: Adds new service +* `service/docdb`: Updates service API and documentation + * This release adds a new parameter 'DBClusterParameterGroupName' to 'RestoreDBClusterFromSnapshot' API to associate the name of the DB cluster parameter group while performing restore. +* `service/fsx`: Updates service documentation +* `service/lambda`: Updates service API and documentation + * This release adds a new Lambda InvokeWithResponseStream API to support streaming Lambda function responses. The release also adds a new InvokeMode parameter to Function Url APIs to control whether the response will be streamed or buffered. +* `service/quicksight`: Updates service API and documentation + * This release has two changes: adding the OR condition to tag-based RLS rules in CreateDataSet and UpdateDataSet; adding RefreshSchedule and Incremental RefreshProperties operations for users to programmatically configure SPICE dataset ingestions. +* `service/redshift-data`: Updates service documentation +* `service/servicecatalog`: Updates service documentation + * Updates description for property + Release v1.44.238 (2023-04-06) === diff --git a/aws/version.go b/aws/version.go index ba51da4856f..216c6d49a15 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.238" +const SDKVersion = "1.44.239" diff --git a/models/apis/dlm/2018-01-12/endpoint-rule-set-1.json b/models/apis/dlm/2018-01-12/endpoint-rule-set-1.json index efc50d4c587..7069e7a3bbb 100644 --- a/models/apis/dlm/2018-01-12/endpoint-rule-set-1.json +++ b/models/apis/dlm/2018-01-12/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,179 +111,240 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://dlm-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://dlm-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://dlm.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://dlm.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://dlm-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://dlm-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://dlm.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -311,7 +352,7 @@ { "conditions": [], "endpoint": { - "url": "https://dlm.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://dlm.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -320,28 +361,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://dlm.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/dlm/2018-01-12/endpoint-tests-1.json b/models/apis/dlm/2018-01-12/endpoint-tests-1.json index e0a57f37a78..53f0e7caa24 100644 --- a/models/apis/dlm/2018-01-12/endpoint-tests-1.json +++ b/models/apis/dlm/2018-01-12/endpoint-tests-1.json @@ -1,159 +1,159 @@ { "testCases": [ { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://dlm.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-iso-east-1" + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-iso-east-1.c2s.ic.gov" + "url": "https://dlm.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-northeast-3.amazonaws.com" + "url": "https://dlm.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-east-1.amazonaws.com" + "url": "https://dlm.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-west-1.amazonaws.com" + "url": "https://dlm.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-west-2.amazonaws.com" + "url": "https://dlm.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-west-3.amazonaws.com" + "url": "https://dlm.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.me-south-1.amazonaws.com" + "url": "https://dlm.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-north-1.amazonaws.com" + "url": "https://dlm.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-east-2.amazonaws.com" + "url": "https://dlm.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.sa-east-1.amazonaws.com" + "url": "https://dlm.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-east-1.amazonaws.com" + "url": "https://dlm.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -164,74 +164,100 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.eu-central-1.amazonaws.com" + "url": "https://dlm.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-southeast-1.amazonaws.com" + "url": "https://dlm.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-southeast-2.amazonaws.com" + "url": "https://dlm.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-southeast-3.amazonaws.com" + "url": "https://dlm.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ca-central-1.amazonaws.com" + "url": "https://dlm.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dlm.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dlm.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -242,9 +268,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -255,152 +281,165 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://dlm.af-south-1.amazonaws.com" + "url": "https://dlm-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "af-south-1" + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-south-1.amazonaws.com" + "url": "https://dlm-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dlm.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-northeast-1.amazonaws.com" + "url": "https://dlm.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.ap-northeast-2.amazonaws.com" + "url": "https://dlm.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://dlm-fips.us-east-1.api.aws" + "url": "https://dlm-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm-fips.us-east-1.amazonaws.com" + "url": "https://dlm-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://dlm.us-east-1.api.aws" + "url": "https://dlm.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-gov-west-1.amazonaws.com" + "url": "https://dlm.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-gov-west-1.amazonaws.com" + "url": "https://dlm.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-gov-east-1.amazonaws.com" + "url": "https://dlm.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-gov-east-1.amazonaws.com" + "url": "https://dlm.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -411,9 +450,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -424,113 +463,131 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://dlm-fips.us-isob-east-1.sc2s.sgov.gov" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.us-isob-east-1.sc2s.sgov.gov" + "url": "https://dlm-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "us-isob-east-1" + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://dlm.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.cn-north-1.amazonaws.com.cn" + "url": "https://dlm.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://dlm-fips.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm-fips.cn-north-1.amazonaws.com.cn" + "url": "https://dlm-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://dlm.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://dlm.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -540,9 +597,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -552,11 +609,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/docdb/2014-10-31/api-2.json b/models/apis/docdb/2014-10-31/api-2.json index 6b7ddaa68bd..7edf9e0180c 100644 --- a/models/apis/docdb/2014-10-31/api-2.json +++ b/models/apis/docdb/2014-10-31/api-2.json @@ -2846,7 +2846,8 @@ "Tags":{"shape":"TagList"}, "KmsKeyId":{"shape":"String"}, "EnableCloudwatchLogsExports":{"shape":"LogTypeList"}, - "DeletionProtection":{"shape":"BooleanOptional"} + "DeletionProtection":{"shape":"BooleanOptional"}, + "DBClusterParameterGroupName":{"shape":"String"} } }, "RestoreDBClusterFromSnapshotResult":{ diff --git a/models/apis/docdb/2014-10-31/docs-2.json b/models/apis/docdb/2014-10-31/docs-2.json index 14581e865af..66a42d9a046 100644 --- a/models/apis/docdb/2014-10-31/docs-2.json +++ b/models/apis/docdb/2014-10-31/docs-2.json @@ -1571,7 +1571,7 @@ "FailoverDBClusterMessage$TargetDBInstanceIdentifier": "
The name of the instance to promote to the primary instance.
You must specify the instance identifier for an Amazon DocumentDB replica in the cluster. For example, mydbcluster-replica1
.
The name of the filter. Filter names are case sensitive.
", "FilterValueList$member": null, - "GlobalCluster$GlobalClusterResourceId": "The Amazon Web Services Region-unique, immutable identifier for the global database cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS customer master key (CMK) for the cluster is accessed.
", + "GlobalCluster$GlobalClusterResourceId": "The Amazon Web Services Region-unique, immutable identifier for the global database cluster. This identifier is found in CloudTrail log entries whenever the KMS customer master key (CMK) for the cluster is accessed.
", "GlobalCluster$GlobalClusterArn": "The Amazon Resource Name (ARN) for the global cluster.
", "GlobalCluster$Status": "Specifies the current state of this global cluster.
", "GlobalCluster$Engine": "The Amazon DocumentDB database engine used by the global cluster.
", @@ -1643,8 +1643,9 @@ "RestoreDBClusterFromSnapshotMessage$EngineVersion": "The version of the database engine to use for the new cluster.
", "RestoreDBClusterFromSnapshotMessage$DBSubnetGroupName": "The name of the subnet group to use for the new cluster.
Constraints: If provided, must match the name of an existing DBSubnetGroup
.
Example: mySubnetgroup
The KMS key identifier to use when restoring an encrypted cluster from a DB snapshot or cluster snapshot.
The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a cluster with the same Amazon Web Services account that owns the KMS encryption key used to encrypt the new cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.
If you do not specify a value for the KmsKeyId
parameter, then the following occurs:
If the snapshot or cluster snapshot in SnapshotIdentifier
is encrypted, then the restored cluster is encrypted using the KMS key that was used to encrypt the snapshot or the cluster snapshot.
If the snapshot or the cluster snapshot in SnapshotIdentifier
is not encrypted, then the restored DB cluster is not encrypted.
The name of the DB cluster parameter group to associate with this DB cluster.
Type: String. Required: No.
If this argument is omitted, the default DB cluster parameter group is used. If supplied, must match the name of an existing default DB cluster parameter group. The string must consist of from 1 to 255 letters, numbers or hyphens. Its first character must be a letter, and it cannot end with a hyphen or contain two consecutive hyphens.
", "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "The name of the new cluster to be created.
Constraints:
Must contain from 1 to 63 letters, numbers, or hyphens.
The first character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
The type of restore to be performed. You can specify one of the following values:
full-copy
- The new DB cluster is restored as a full copy of the source DB cluster.
copy-on-write
- The new DB cluster is restored as a clone of the source DB cluster.
If you don't specify a RestoreType
value, then the new DB cluster is restored as a full copy of the source DB cluster.
The type of restore to be performed. You can specify one of the following values:
full-copy
- The new DB cluster is restored as a full copy of the source DB cluster.
copy-on-write
- The new DB cluster is restored as a clone of the source DB cluster.
Constraints: You can't specify copy-on-write
if the engine version of the source DB cluster is earlier than 1.11.
If you don't specify a RestoreType
value, then the new DB cluster is restored as a full copy of the source DB cluster.
The identifier of the source cluster from which to restore.
Constraints:
Must match the identifier of an existing DBCluster
.
The subnet group name to use for the new cluster.
Constraints: If provided, must match the name of an existing DBSubnetGroup
.
Example: mySubnetgroup
The KMS key identifier to use when restoring an encrypted cluster from an encrypted cluster.
The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a cluster with the same Amazon Web Services account that owns the KMS encryption key used to encrypt the new cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.
You can restore to a new cluster and encrypt the new cluster with an KMS key that is different from the KMS key used to encrypt the source cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId
parameter.
If you do not specify a value for the KmsKeyId
parameter, then the following occurs:
If the cluster is encrypted, then the restored cluster is encrypted using the KMS key that was used to encrypt the source cluster.
If the cluster is not encrypted, then the restored cluster is not encrypted.
If DBClusterIdentifier
refers to a cluster that is not encrypted, then the restore request is rejected.
Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING
or EXECUTING
state. When you cancel a task, Amazon FSx does the following.
Any files that FSx has already exported are not reverted.
FSx continues to export any files that are \"in-flight\" when the cancel operation is received.
FSx does not export any files that have not yet been exported.
Copies an existing backup within the same Amazon Web Services account to another Amazon Web Services Region (cross-Region copy) or within the same Amazon Web Services Region (in-Region copy). You can have up to five backup copy requests in progress to a single destination Region per account.
You can use cross-Region backup copies for cross-Region disaster recovery. You can periodically take backups and copy them to another Region so that in the event of a disaster in the primary Region, you can restore from backup and recover availability quickly in the other Region. You can make cross-Region copies only within your Amazon Web Services partition. A partition is a grouping of Regions. Amazon Web Services currently has three partitions: aws
(Standard Regions), aws-cn
(China Regions), and aws-us-gov
(Amazon Web Services GovCloud [US] Regions).
You can also use backup copies to clone your file dataset to another Region or within the same Region.
You can use the SourceRegion
parameter to specify the Amazon Web Services Region from which the backup will be copied. For example, if you make the call from the us-west-1
Region and want to copy a backup from the us-east-2
Region, you specify us-east-2
in the SourceRegion
parameter to make a cross-Region copy. If you don't specify a Region, the backup copy is created in the same Region where the request is sent from (in-Region copy).
For more information about creating backup copies, see Copying backups in the Amazon FSx for Windows User Guide, Copying backups in the Amazon FSx for Lustre User Guide, and Copying backups in the Amazon FSx for OpenZFS User Guide.
", "CreateBackup": "Creates a backup of an existing Amazon FSx for Windows File Server file system, Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon FSx for OpenZFS file system. We recommend creating regular backups so that you can restore a file system or volume from a backup if an issue arises with the original file system or volume.
For Amazon FSx for Lustre file systems, you can create a backup only for file systems that have the following configuration:
A Persistent deployment type
Are not linked to a data repository
For more information about backups, see the following:
For Amazon FSx for Lustre, see Working with FSx for Lustre backups.
For Amazon FSx for Windows, see Working with FSx for Windows backups.
For Amazon FSx for NetApp ONTAP, see Working with FSx for NetApp ONTAP backups.
For Amazon FSx for OpenZFS, see Working with FSx for OpenZFS backups.
If a backup with the specified client request token exists and the parameters match, this operation returns the description of the existing backup. If a backup with the specified client request token exists and the parameters don't match, this operation returns IncompatibleParameterError
. If a backup with the specified client request token doesn't exist, CreateBackup
does the following:
Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of CREATING
.
Returns the description of the backup.
By using the idempotent operation, you can retry a CreateBackup
operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.
The CreateBackup
operation returns while the backup's lifecycle state is still CREATING
. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.
Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported only for file systems with the Persistent_2
deployment type.
Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.
CreateDataRepositoryAssociation
isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache
operation.
Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported for all file systems except for Scratch_1
deployment type.
Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.
CreateDataRepositoryAssociation
isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache
operation.
Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. A CreateDataRepositoryTask
operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.
Creates a new Amazon File Cache resource.
You can use this operation with a client request token in the request that Amazon File Cache uses to ensure idempotent creation. If a cache with the specified client request token exists and the parameters match, CreateFileCache
returns the description of the existing cache. If a cache with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError
. If a file cache with the specified client request token doesn't exist, CreateFileCache
does the following:
Creates a new, empty Amazon File Cache resourcewith an assigned ID, and an initial lifecycle state of CREATING
.
Returns the description of the cache in JSON format.
The CreateFileCache
call returns while the cache's lifecycle state is still CREATING
. You can check the cache creation status by calling the DescribeFileCaches operation, which returns the cache state along with other information.
Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the CreateFileSystem
API operation:
Amazon FSx for Lustre
Amazon FSx for NetApp ONTAP
Amazon FSx for OpenZFS
Amazon FSx for Windows File Server
This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem
operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.
If a file system with the specified client request token exists and the parameters match, CreateFileSystem
returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError
. If a file system with the specified client request token doesn't exist, CreateFileSystem
does the following:
Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of CREATING
.
Returns the description of the file system in JSON format.
The CreateFileSystem
call returns while the file system's lifecycle state is still CREATING
. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.
Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.
", "CreateVolumeFromBackup": "Creates a new Amazon FSx for NetApp ONTAP volume from an existing Amazon FSx volume backup.
", "DeleteBackup": "Deletes an Amazon FSx backup. After deletion, the backup no longer exists, and its data is gone.
The DeleteBackup
call returns instantly. The backup won't show up in later DescribeBackups
calls.
The data in a deleted backup is also deleted and can't be recovered by any means.
Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported only for file systems with the Persistent_2
deployment type.
Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported for all file systems except for Scratch_1
deployment type.
Deletes an Amazon File Cache resource. After deletion, the cache no longer exists, and its data is gone.
The DeleteFileCache
operation returns while the cache has the DELETING
status. You can check the cache deletion status by calling the DescribeFileCaches operation, which returns a list of caches in your account. If you pass the cache ID for a deleted cache, the DescribeFileCaches
operation returns a FileCacheNotFound
error.
The data in a deleted cache is also deleted and can't be recovered by any means.
Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.
To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId
value to the DeleFileSystem
operation.
By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.
The DeleteFileSystem
operation returns while the file system has the DELETING
status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems
operation returns a FileSystemNotFound
error.
If a data repository task is in a PENDING
or EXECUTING
state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).
The data in a deleted file system is also deleted and can't be recovered by any means.
Deletes an Amazon FSx for OpenZFS snapshot. After deletion, the snapshot no longer exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a file system backup.
The DeleteSnapshot
operation returns instantly. The snapshot appears with the lifecycle status of DELETING
until the deletion is complete.
Deletes an existing Amazon FSx for ONTAP storage virtual machine (SVM). Prior to deleting an SVM, you must delete all non-root volumes in the SVM, otherwise the operation will fail.
", "DeleteVolume": "Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.
", "DescribeBackups": "Returns the description of a specific Amazon FSx backup, if a BackupIds
value is provided for that backup. Otherwise, it returns all backups owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.
When retrieving all backups, you can optionally specify the MaxResults
parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken
value in the response. In this case, send a later request with the NextToken
request parameter set to the value of the NextToken
value from the last response.
This operation is used in an iterative process to retrieve a list of your backups. DescribeBackups
is called first without a NextToken
value. Then the operation continues to be called with the NextToken
parameter set to the value of the last NextToken
value until a response has no NextToken
value.
When using this operation, keep the following in mind:
The operation might return fewer than the MaxResults
value of backup descriptions while still including a NextToken
value.
The order of the backups returned in the response of one DescribeBackups
call and the order of the backups returned across the responses of a multi-call iteration is unspecified.
Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds
values are provided in the request, or if filters are used in the request. Data repository associations are supported only for Amazon FSx for Lustre file systems with the Persistent_2
deployment type and for Amazon File Cache resources.
You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id
filter with the ID of the file system) or caches (use the file-cache-id
filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type
filter with a value of S3
or NFS
). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.
When retrieving all data repository associations, you can paginate the response by using the optional MaxResults
parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken
value is returned in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.
Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds
values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1
deployment types.
You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id
filter with the ID of the file system) or caches (use the file-cache-id
filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type
filter with a value of S3
or NFS
). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.
When retrieving all data repository associations, you can paginate the response by using the optional MaxResults
parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken
value is returned in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.
Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository tasks, if one or more TaskIds
values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems or caches, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.
When retrieving all tasks, you can paginate the response by using the optional MaxResults
parameter to limit the number of tasks returned in a response. If more tasks remain, a NextToken
value is returned in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.
Returns the description of a specific Amazon File Cache resource, if a FileCacheIds
value is provided for that cache. Otherwise, it returns descriptions of all caches owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.
When retrieving all cache descriptions, you can optionally specify the MaxResults
parameter to limit the number of descriptions in a response. If more cache descriptions remain, the operation returns a NextToken
value in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.
This operation is used in an iterative process to retrieve a list of your cache descriptions. DescribeFileCaches
is called first without a NextToken
value. Then the operation continues to be called with the NextToken
parameter set to the value of the last NextToken
value until a response has no NextToken
.
When using this operation, keep the following in mind:
The implementation might return fewer than MaxResults
cache descriptions while still including a NextToken
value.
The order of caches returned in the response of one DescribeFileCaches
call and the order of caches returned across the responses of a multicall iteration is unspecified.
Returns the DNS aliases that are associated with the specified Amazon FSx for Windows File Server file system. A history of all DNS aliases that have been associated with and disassociated from the file system is available in the list of AdministrativeAction provided in the DescribeFileSystems operation response.
", @@ -37,9 +37,9 @@ "RestoreVolumeFromSnapshot": "Returns an Amazon FSx for OpenZFS volume to the state saved by the specified snapshot.
", "TagResource": "Tags an Amazon FSx resource.
", "UntagResource": "This action removes a tag from an Amazon FSx resource.
", - "UpdateDataRepositoryAssociation": "Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported only for file systems with the Persistent_2
deployment type.
Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported for all file systems except for Scratch_1
deployment type.
Updates the configuration of an existing Amazon File Cache resource. You can update multiple properties in a single request.
", - "UpdateFileSystem": "Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.
For Amazon FSx for Windows File Server file systems, you can update the following properties:
AuditLogConfiguration
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
SelfManagedActiveDirectoryConfiguration
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
For Amazon FSx for Lustre file systems, you can update the following properties:
AutoImportPolicy
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
DataCompressionType
LustreRootSquashConfiguration
StorageCapacity
WeeklyMaintenanceStartTime
For Amazon FSx for NetApp ONTAP file systems, you can update the following properties:
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
DiskIopsConfiguration
FsxAdminPassword
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
For the Amazon FSx for OpenZFS file systems, you can update the following properties:
AutomaticBackupRetentionDays
CopyTagsToBackups
CopyTagsToVolumes
DailyAutomaticBackupStartTime
ThroughputCapacity
WeeklyMaintenanceStartTime
Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.
For FSx for Windows File Server file systems, you can update the following properties:
AuditLogConfiguration
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
SelfManagedActiveDirectoryConfiguration
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
For FSx for Lustre file systems, you can update the following properties:
AutoImportPolicy
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
DataCompressionType
LustreRootSquashConfiguration
StorageCapacity
WeeklyMaintenanceStartTime
For FSx for ONTAP file systems, you can update the following properties:
AddRouteTableIds
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
DiskIopsConfiguration
FsxAdminPassword
RemoveRouteTableIds
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
For FSx for OpenZFS file systems, you can update the following properties:
AutomaticBackupRetentionDays
CopyTagsToBackups
CopyTagsToVolumes
DailyAutomaticBackupStartTime
DiskIopsConfiguration
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
Updates the name of an Amazon FSx for OpenZFS snapshot.
", "UpdateStorageVirtualMachine": "Updates an Amazon FSx for ONTAP storage virtual machine (SVM).
", "UpdateVolume": "Updates the configuration of an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.
" @@ -51,7 +51,7 @@ "Backup$OwnerId": null, "FileCache$OwnerId": null, "FileCacheCreating$OwnerId": null, - "FileSystem$OwnerId": "The Amazon Web Services account that created the file system. If the file system was created by an Identity and Access Management (IAM) user, the Amazon Web Services account to which the IAM user belongs is the owner.
" + "FileSystem$OwnerId": "The Amazon Web Services account that created the file system. If the file system was created by a user in IAM Identity Center, the Amazon Web Services account to which the IAM user belongs is the owner.
" } }, "ActiveDirectoryBackupAttributes": { @@ -154,10 +154,10 @@ "ArchivePath": { "base": null, "refs": { - "CompletionReport$Path": "Required if Enabled
is set to true
. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The Path
you provide must be located within the file system’s ExportPath. An example Path
value is \"s3://myBucket/myExportPath/optionalPrefix\". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. To learn more about a file system's ExportPath
, see .
Required if Enabled
is set to true
. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The Path
you provide must be located within the file system’s ExportPath. An example Path
value is \"s3://myBucket/myExportPath/optionalPrefix\". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode.
The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/
. This path specifies where in the S3 data repository files will be imported from or exported to.
(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.
This parameter is not supported for file systems with the Persistent_2
deployment type. Instead, use CreateDataRepositoryAssociation
to create a data repository association to link your Lustre file system to a data repository.
(Optional) Available with Scratch
and Persistent_1
deployment types. Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath
value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z
.
The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.
This parameter is not supported for file systems with the Persistent_2
deployment type. Instead, use CreateDataRepositoryAssociation
to create a data repository association to link your Lustre file system to a data repository.
(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.
This parameter is not supported for file systems with a data repository association.
(Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath
value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z
.
The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.
This parameter is not supported for file systems with a data repository association.
The path to the data repository that will be linked to the cache or file system.
For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:
If you are not using the DataRepositorySubdirectories
parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nsf://nfs-domain-name/exportpath
. You can therefore link a single NFS Export to a single data repository association.
If you are using the DataRepositorySubdirectories
parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name
, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories
parameter.
For Amazon File Cache, the path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/
.
For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/
.
The import path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example s3://import-bucket/optional-prefix
. If a prefix is specified after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.
The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.
", @@ -175,14 +175,14 @@ } }, "AutoExportPolicy": { - "base": "Describes a data repository association's automatic export policy. The AutoExportPolicy
defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx for Lustre automatically exports the defined changes asynchronously once your application finishes modifying the file.
This AutoExportPolicy
is supported only for Amazon FSx for Lustre file systems with the Persistent_2
deployment type.
Describes a data repository association's automatic export policy. The AutoExportPolicy
defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx for Lustre automatically exports the defined changes asynchronously once your application finishes modifying the file.
The AutoExportPolicy
is only supported on Amazon FSx for Lustre file systems with a data repository association.
This parameter is not supported for Amazon File Cache.
", "S3DataRepositoryConfiguration$AutoExportPolicy": "Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket.
" } }, "AutoImportPolicy": { - "base": "Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your Amazon FSx for Lustre file system as you modify objects in a linked S3 bucket.
The AutoImportPolicy
is supported only for Amazon FSx for Lustre file systems with the Persistent_2
deployment type.
Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your Amazon FSx for Lustre file system as you modify objects in a linked S3 bucket.
The AutoImportPolicy
is only supported on Amazon FSx for Lustre file systems with a data repository association.
Specifies the type of updated objects (new, changed, deleted) that will be automatically imported from the linked S3 bucket to your file system.
" } @@ -190,9 +190,9 @@ "AutoImportPolicyType": { "base": null, "refs": { - "CreateFileSystemLustreConfiguration$AutoImportPolicy": " (Optional) Available with Scratch
and Persistent_1
deployment types. When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.
NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.
NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.
NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.
For more information, see Automatically import updates from your S3 bucket.
This parameter is not supported for file systems with the Persistent_2
deployment type. Instead, use CreateDataRepositoryAssociation
to create a data repository association to link your Lustre file system to a data repository.
(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this parameter to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.
NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.
NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.
NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.
For more information, see Automatically import updates from your S3 bucket.
This parameter is not supported for file systems with a data repository association.
Describes the file system's linked S3 data repository's AutoImportPolicy
. The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.
NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.
NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.
NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.
(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update the file and directory listing for any new or changed objects after choosing this option.
NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.
NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.
NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.
The AutoImportPolicy
parameter is not supported for Lustre file systems with the Persistent_2
deployment type. Instead, use to update a data repository association on your Persistent_2
file system.
(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update the file and directory listing for any new or changed objects after choosing this option.
NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.
NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.
NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.
This parameter is not supported for file systems with a data repository association.
" } }, "AutomaticBackupRetentionDays": { @@ -319,24 +319,24 @@ } }, "ClientRequestToken": { - "base": "(Optional) An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", + "base": "(Optional) An idempotency token for resource creation, in a string of up to 63 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", "refs": { "AssociateFileSystemAliasesRequest$ClientRequestToken": null, "CopyBackupRequest$ClientRequestToken": null, - "CreateBackupRequest$ClientRequestToken": "(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", + "CreateBackupRequest$ClientRequestToken": "(Optional) A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", "CreateDataRepositoryAssociationRequest$ClientRequestToken": null, "CreateDataRepositoryTaskRequest$ClientRequestToken": null, - "CreateFileCacheRequest$ClientRequestToken": "An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
By using the idempotent operation, you can retry a CreateFileCache
operation without the risk of creating an extra cache. This approach can be useful when an initial call fails in a way that makes it unclear whether a cache was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a cache, the client receives success as long as the parameters are the same.
A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", - "CreateFileSystemRequest$ClientRequestToken": "A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", + "CreateFileCacheRequest$ClientRequestToken": "An idempotency token for resource creation, in a string of up to 63 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
By using the idempotent operation, you can retry a CreateFileCache
operation without the risk of creating an extra cache. This approach can be useful when an initial call fails in a way that makes it unclear whether a cache was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a cache, the client receives success as long as the parameters are the same.
A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", + "CreateFileSystemRequest$ClientRequestToken": "A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", "CreateSnapshotRequest$ClientRequestToken": null, "CreateStorageVirtualMachineRequest$ClientRequestToken": null, "CreateVolumeFromBackupRequest$ClientRequestToken": null, "CreateVolumeRequest$ClientRequestToken": null, - "DeleteBackupRequest$ClientRequestToken": "A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This parameter is automatically filled on your behalf when using the CLI or SDK.
", + "DeleteBackupRequest$ClientRequestToken": "A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This parameter is automatically filled on your behalf when using the CLI or SDK.
", "DeleteDataRepositoryAssociationRequest$ClientRequestToken": null, "DeleteFileCacheRequest$ClientRequestToken": null, - "DeleteFileSystemRequest$ClientRequestToken": "A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This token is automatically filled on your behalf when using the Command Line Interface (CLI) or an Amazon Web Services SDK.
", + "DeleteFileSystemRequest$ClientRequestToken": "A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This token is automatically filled on your behalf when using the Command Line Interface (CLI) or an Amazon Web Services SDK.
", "DeleteSnapshotRequest$ClientRequestToken": null, "DeleteStorageVirtualMachineRequest$ClientRequestToken": null, "DeleteVolumeRequest$ClientRequestToken": null, @@ -346,7 +346,7 @@ "RestoreVolumeFromSnapshotRequest$ClientRequestToken": null, "UpdateDataRepositoryAssociationRequest$ClientRequestToken": null, "UpdateFileCacheRequest$ClientRequestToken": null, - "UpdateFileSystemRequest$ClientRequestToken": "A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", + "UpdateFileSystemRequest$ClientRequestToken": "A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
", "UpdateSnapshotRequest$ClientRequestToken": null, "UpdateStorageVirtualMachineRequest$ClientRequestToken": null, "UpdateVolumeRequest$ClientRequestToken": null @@ -445,7 +445,7 @@ } }, "CreateFileSystemLustreConfiguration": { - "base": "The Lustre configuration for the file system being created.
The following parameters are not supported for file systems with the Persistent_2
deployment type. Instead, use CreateDataRepositoryAssociation
to create a data repository association to link your Lustre file system to a data repository.
AutoImportPolicy
ExportPath
ImportedChunkSize
ImportPath
The Lustre configuration for the file system being created.
The following parameters are not supported for file systems with a data repository association created with .
AutoImportPolicy
ExportPath
ImportedChunkSize
ImportPath
The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:
CreateDataRepositoryAssociation
UpdateDataRepositoryAssociation
DescribeDataRepositoryAssociations
Data repository associations are supported only for an Amazon FSx for Lustre file system with the Persistent_2
deployment type and for an Amazon File Cache resource.
The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:
CreateDataRepositoryAssociation
UpdateDataRepositoryAssociation
DescribeDataRepositoryAssociations
Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1
deployment types.
The response object returned after the data repository association is created.
", "DataRepositoryAssociations$member": null, @@ -634,7 +634,7 @@ } }, "DataRepositoryConfiguration": { - "base": "The data repository configuration object for Lustre file systems returned in the response of the CreateFileSystem
operation.
This data type is not supported for file systems with the Persistent_2
deployment type. Instead, use .
The data repository configuration object for Lustre file systems returned in the response of the CreateFileSystem
operation.
This data type is not supported on file systems with a data repository association. For file systems with a data repository association, see .
", "refs": { "LustreFileSystemConfiguration$DataRepositoryConfiguration": null } @@ -742,7 +742,7 @@ "base": null, "refs": { "CreateDataRepositoryTaskRequest$Type": "Specifies the type of data repository task to create.
", - "DataRepositoryTask$Type": "The type of data repository task.
EXPORT_TO_REPOSITORY
tasks export from your Amazon FSx for Lustre file system to a linked data repository.
IMPORT_METADATA_FROM_REPOSITORY
tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.
AUTO_RELEASE_DATA
tasks automatically release files from an Amazon File Cache resource.
The type of data repository task.
EXPORT_TO_REPOSITORY
tasks export from your Amazon FSx for Lustre file system to a linked data repository.
IMPORT_METADATA_FROM_REPOSITORY
tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.
AUTO_RELEASE_DATA
tasks automatically release files from an Amazon File Cache resource.
RELEASE_DATA_FROM_FILESYSTEM
tasks are not supported.
A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to false
. If it's set to true
, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true
, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.
A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to false
. If it's set to true
, all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is true
, and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.
A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.
", - "CreateOntapVolumeConfiguration$StorageEfficiencyEnabled": "Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume.
", + "CreateOntapVolumeConfiguration$StorageEfficiencyEnabled": "Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume, or set to false to disable them. This parameter is required.
", "CreateOntapVolumeConfiguration$CopyTagsToBackups": "A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.
", "CreateOpenZFSVolumeConfiguration$CopyTagsToSnapshots": "A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false
. If it's set to true
, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true
, and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.
Set SkipFinalBackup
to false if you want to take a final backup of the file system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the DeleteFileSystem
operation is invoked. (Default = true)
The fsx:CreateBackup
permission is required if you set SkipFinalBackup
to false
in order to delete the file system and take a final backup.
(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.
", - "OntapFileSystemConfiguration$EndpointIpAddressRange": "(Multi-AZ only) The IP address range in which the endpoints to access your file system are created.
The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.
(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
", + "OntapFileSystemConfiguration$EndpointIpAddressRange": "(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.
" } }, "JunctionPath": { "base": null, "refs": { - "CreateOntapVolumeConfiguration$JunctionPath": "Specifies the location in the SVM's namespace where the volume is mounted. The JunctionPath
must have a leading forward slash, such as /vol3
.
Specifies the location in the SVM's namespace where the volume is mounted. This parameter is required. The JunctionPath
must have a leading forward slash, such as /vol3
.
Specifies the directory that network-attached storage (NAS) clients use to mount the volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP address. You can create a JunctionPath
directly below a parent volume junction or on a directory within a volume. A JunctionPath
for a volume named vol3
might be /vol1/vol2/vol3
, or /vol1/dir2/vol3
, or even /dir1/dir2/vol3
.
Specifies the location in the SVM's namespace where the volume is mounted. The JunctionPath
must have a leading forward slash, such as /vol3
.
(Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.
Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher PerUnitStorageThroughput
(up to 1000 MB/s/TiB). PERSISTENT_2
is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2
is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to 2.10
, the CreateFileSystem
operation fails.
Encryption of data in transit is automatically turned on when you access SCRATCH_2
, PERSISTENT_1
and PERSISTENT_2
file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.
(Default = SCRATCH_1
)
(Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.
Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher PerUnitStorageThroughput
(up to 1000 MB/s/TiB). PERSISTENT_2
is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2
is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to 2.10
, the CreateFileSystem
operation fails.
Encryption of data in transit is automatically turned on when you access SCRATCH_2
, PERSISTENT_1
and PERSISTENT_2
file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.
(Default = SCRATCH_1
)
The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.
SCRATCH_1
and SCRATCH_2
deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
The PERSISTENT_1
and PERSISTENT_2
deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2
is built on Lustre v2.12 and offers higher PerUnitStorageThroughput
(up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see FSx for Lustre deployment options.
The default is SCRATCH_1
.
For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
", - "CreateFileSystemLustreConfiguration$ImportedFileChunkSize": "(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
This parameter is not supported for file systems with the Persistent_2
deployment type. Instead, use CreateDataRepositoryAssociation
to create a data repository association to link your Lustre file system to a data repository.
(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
This parameter is not supported for file systems with a data repository association.
For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.
The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
", "DataRepositoryConfiguration$ImportedFileChunkSize": "For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
", "UpdateDataRepositoryAssociationRequest$ImportedFileChunkSize": "For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
" @@ -2282,7 +2282,7 @@ "FileCache$StorageCapacity": "The storage capacity of the cache in gibibytes (GiB).
", "FileCacheCreating$StorageCapacity": "The storage capacity of the cache in gibibytes (GiB).
", "FileSystem$StorageCapacity": "The storage capacity of the file system in gibibytes (GiB).
", - "UpdateFileSystemRequest$StorageCapacity": "Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server, Amazon FSx for Lustre, or Amazon FSx for NetApp ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating.
You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.
For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide.
For Lustre file systems, the storage capacity target value can be the following:
For SCRATCH_2
, PERSISTENT_1
, and PERSISTENT_2 SSD
deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.
For PERSISTENT HDD
file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity.
For SCRATCH_1
file systems, you can't increase the storage capacity.
For more information, see Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.
For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.
" + "UpdateFileSystemRequest$StorageCapacity": "Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating.
You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.
For Lustre file systems, the storage capacity target value can be the following:
For SCRATCH_2
, PERSISTENT_1
, and PERSISTENT_2 SSD
deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.
For PERSISTENT HDD
file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity.
For SCRATCH_1
file systems, you can't increase the storage capacity.
For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide.
For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide.
For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide.
For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.
" } }, "StorageType": { @@ -2691,7 +2691,7 @@ "VolumeCapacity": { "base": null, "refs": { - "CreateOntapVolumeConfiguration$SizeInMegabytes": "Specifies the size of the volume, in megabytes (MB), that you are creating.
", + "CreateOntapVolumeConfiguration$SizeInMegabytes": "Specifies the size of the volume, in megabytes (MB), that you are creating. Provide any whole number in the range of 20–104857600 to specify the size of the volume.
", "OntapVolumeConfiguration$SizeInMegabytes": "The configured size of the volume, in megabytes (MBs).
", "UpdateOntapVolumeConfiguration$SizeInMegabytes": "Specifies the size of the volume in megabytes.
" } diff --git a/models/apis/fsx/2018-03-01/endpoint-rule-set-1.json b/models/apis/fsx/2018-03-01/endpoint-rule-set-1.json index c34da7df90e..b5cc1311213 100644 --- a/models/apis/fsx/2018-03-01/endpoint-rule-set-1.json +++ b/models/apis/fsx/2018-03-01/endpoint-rule-set-1.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,307 +111,238 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "Region" } - ] - }, + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://fsx-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "prod-ca-central-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "endpoint": { - "url": "https://fsx-fips.ca-central-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + }, { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "prod-us-east-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], - "endpoint": { - "url": "https://fsx-fips.us-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "type": "tree", + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "Region" - }, - "prod-us-east-2" + "conditions": [], + "endpoint": { + "url": "https://fsx-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } - ], - "endpoint": { - "url": "https://fsx-fips.us-east-2.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "prod-us-west-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], - "endpoint": { - "url": "https://fsx-fips.us-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "type": "tree", + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "Region" - }, - "prod-us-west-2" + "conditions": [], + "endpoint": { + "url": "https://fsx-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } - ], - "endpoint": { - "url": "https://fsx-fips.us-west-2.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" - }, - "prod-us-gov-east-1" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], - "endpoint": { - "url": "https://fsx-fips.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "type": "tree", + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "Region" - }, - "prod-us-gov-west-1" + "conditions": [], + "endpoint": { + "url": "https://fsx.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } - ], - "endpoint": { - "url": "https://fsx-fips.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ] }, { "conditions": [], - "endpoint": { - "url": "https://fsx-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://fsx.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://fsx.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://fsx.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/models/apis/fsx/2018-03-01/endpoint-tests-1.json b/models/apis/fsx/2018-03-01/endpoint-tests-1.json index 7893d02ee7b..283b2995923 100644 --- a/models/apis/fsx/2018-03-01/endpoint-tests-1.json +++ b/models/apis/fsx/2018-03-01/endpoint-tests-1.json @@ -1,1455 +1,558 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-south-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "me-central-1" - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "me-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-1" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-2" - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "af-south-1" - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "af-south-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-north-1" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-3" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fsx-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "me-south-1" - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.me-south-1.amazonaws.com" + "url": "https://fsx.af-south-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "me-south-1" + "Region": "af-south-1", + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.sa-east-1.api.aws" + "url": "https://fsx.ap-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "sa-east-1" + "UseFIPS": false, + "Region": "ap-east-1", + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.sa-east-1.amazonaws.com" + "url": "https://fsx.ap-northeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "sa-east-1" + "UseFIPS": false, + "Region": "ap-northeast-1", + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.sa-east-1.api.aws" + "url": "https://fsx.ap-northeast-2.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "sa-east-1" + "Region": "ap-northeast-2", + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.sa-east-1.amazonaws.com" + "url": "https://fsx.ap-northeast-3.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "sa-east-1" + "Region": "ap-northeast-3", + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-east-1.api.aws" + "url": "https://fsx.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-east-1" + "UseFIPS": false, + "Region": "ap-south-1", + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-east-1.amazonaws.com" + "url": "https://fsx.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-east-1" + "UseFIPS": false, + "Region": "ap-southeast-1", + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-east-1.api.aws" + "url": "https://fsx.ap-southeast-2.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-east-1" + "Region": "ap-southeast-2", + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-east-1.amazonaws.com" + "url": "https://fsx.ca-central-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-east-1" + "Region": "ca-central-1", + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://fsx-fips.ca-central-1.amazonaws.com" } }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "Region": "ca-central-1", + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.cn-north-1.amazonaws.com.cn" + "url": "https://fsx.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseFIPS": false, + "Region": "eu-central-1", + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://fsx.eu-north-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "Region": "eu-north-1", + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.cn-north-1.amazonaws.com.cn" + "url": "https://fsx.eu-south-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "Region": "eu-south-1", + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-gov-west-1.api.aws" + "url": "https://fsx.eu-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-west-1" + "UseFIPS": false, + "Region": "eu-west-1", + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-gov-west-1.amazonaws.com" + "url": "https://fsx.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-west-1" + "UseFIPS": false, + "Region": "eu-west-2", + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.us-gov-west-1.api.aws" + "url": "https://fsx.eu-west-3.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-west-1" + "Region": "eu-west-3", + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.us-gov-west-1.amazonaws.com" + "url": "https://fsx.me-south-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-west-1" + "Region": "me-south-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-1.api.aws" + "url": "https://fsx.sa-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseFIPS": false, + "Region": "sa-east-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-1.amazonaws.com" + "url": "https://fsx.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseFIPS": false, + "Region": "us-east-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-1.api.aws" + "url": "https://fsx-fips.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-1" + "UseFIPS": true, + "Region": "us-east-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-1.amazonaws.com" + "url": "https://fsx.us-east-2.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "Region": "us-east-2", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-2.api.aws" + "url": "https://fsx-fips.us-east-2.amazonaws.com" } }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-2" + "Region": "us-east-2", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-2.amazonaws.com" + "url": "https://fsx.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseFIPS": false, + "Region": "us-west-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-2.api.aws" + "url": "https://fsx-fips.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-2" + "UseFIPS": true, + "Region": "us-west-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-2.amazonaws.com" + "url": "https://fsx.us-west-2.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "Region": "us-west-2", + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://fsx-fips.us-west-2.amazonaws.com" + } }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-west-2", + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://fsx-fips.us-east-1.api.aws" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://fsx.us-east-1.api.aws" + } }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.us-iso-east-1.c2s.ic.gov" + "url": "https://fsx.cn-north-1.amazonaws.com.cn" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-3.api.aws" + "url": "https://fsx.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-3" + "UseFIPS": false, + "Region": "cn-northwest-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx-fips.ap-southeast-3.amazonaws.com" + "url": "https://fsx-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-3" + "Region": "cn-north-1", + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-3.api.aws" + "url": "https://fsx-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-3" + "UseFIPS": true, + "Region": "cn-north-1", + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx.ap-southeast-3.amazonaws.com" + "url": "https://fsx.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-3" + "Region": "cn-north-1", + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-east-1.api.aws" + "url": "https://fsx.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseFIPS": false, + "Region": "us-gov-east-1", + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-east-1.amazonaws.com" + "url": "https://fsx-fips.us-gov-east-1.amazonaws.com" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.us-east-1.amazonaws.com" + "url": "https://fsx.us-gov-west-1.amazonaws.com" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-east-2.api.aws" + "url": "https://fsx-fips.us-gov-west-1.amazonaws.com" } }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-2" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx-fips.us-east-2.amazonaws.com" + "url": "https://fsx-fips.us-gov-east-1.api.aws" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-2" - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://fsx.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-2" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fsx.us-east-2.amazonaws.com" + "url": "https://fsx.us-gov-east-1.api.aws" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-2" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://fsx-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-northwest-1" + "Region": "us-iso-east-1", + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://fsx-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-northwest-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://fsx.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-northwest-1" + "Region": "us-iso-east-1", + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fsx.cn-northwest-1.amazonaws.com.cn" + "url": "https://fsx.us-iso-east-1.c2s.ic.gov" } }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-northwest-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { @@ -1459,8 +562,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -1472,8 +575,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false } }, { @@ -1483,8 +586,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -1496,12 +599,26 @@ }, "params": { "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "Region": "us-east-1", "UseDualStack": false, - "Region": "us-isob-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1510,7 +627,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -1521,8 +637,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1533,10 +649,16 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, "Region": "us-east-1", + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index 663a119585e..2a44cf5aa41 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -603,6 +603,43 @@ ], "deprecated":true }, + "InvokeWithResponseStream":{ + "name":"InvokeWithResponseStream", + "http":{ + "method":"POST", + "requestUri":"/2021-11-15/functions/{FunctionName}/response-streaming-invocations" + }, + "input":{"shape":"InvokeWithResponseStreamRequest"}, + "output":{"shape":"InvokeWithResponseStreamResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestContentException"}, + {"shape":"RequestTooLargeException"}, + {"shape":"UnsupportedMediaTypeException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"EC2UnexpectedException"}, + {"shape":"SubnetIPAddressLimitReachedException"}, + {"shape":"ENILimitReachedException"}, + {"shape":"EFSMountConnectivityException"}, + {"shape":"EFSMountFailureException"}, + {"shape":"EFSMountTimeoutException"}, + {"shape":"EFSIOException"}, + {"shape":"EC2ThrottledException"}, + {"shape":"EC2AccessDeniedException"}, + {"shape":"InvalidSubnetIDException"}, + {"shape":"InvalidSecurityGroupIDException"}, + {"shape":"InvalidZipFileException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"InvalidRuntimeException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ResourceNotReadyException"} + ] + }, "ListAliases":{ "name":"ListAliases", "http":{ @@ -1513,7 +1550,8 @@ "locationName":"Qualifier" }, "AuthType":{"shape":"FunctionUrlAuthType"}, - "Cors":{"shape":"Cors"} + "Cors":{"shape":"Cors"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "CreateFunctionUrlConfigResponse":{ @@ -1529,7 +1567,8 @@ "FunctionArn":{"shape":"FunctionArn"}, "AuthType":{"shape":"FunctionUrlAuthType"}, "Cors":{"shape":"Cors"}, - "CreationTime":{"shape":"Timestamp"} + "CreationTime":{"shape":"Timestamp"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "DatabaseName":{ @@ -2091,7 +2130,8 @@ "CreationTime":{"shape":"Timestamp"}, "LastModifiedTime":{"shape":"Timestamp"}, "Cors":{"shape":"Cors"}, - "AuthType":{"shape":"FunctionUrlAuthType"} + "AuthType":{"shape":"FunctionUrlAuthType"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "FunctionUrlConfigList":{ @@ -2295,7 +2335,8 @@ "AuthType":{"shape":"FunctionUrlAuthType"}, "Cors":{"shape":"Cors"}, "CreationTime":{"shape":"Timestamp"}, - "LastModifiedTime":{"shape":"Timestamp"} + "LastModifiedTime":{"shape":"Timestamp"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "GetLayerVersionByArnRequest":{ @@ -2641,6 +2682,94 @@ }, "deprecated":true }, + "InvokeMode":{ + "type":"string", + "enum":[ + "BUFFERED", + "RESPONSE_STREAM" + ] + }, + "InvokeResponseStreamUpdate":{ + "type":"structure", + "members":{ + "Payload":{ + "shape":"Blob", + "eventpayload":true + } + }, + "event":true + }, + "InvokeWithResponseStreamCompleteEvent":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"String"}, + "ErrorDetails":{"shape":"String"}, + "LogResult":{"shape":"String"} + }, + "event":true + }, + "InvokeWithResponseStreamRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"NamespacedFunctionName", + "location":"uri", + "locationName":"FunctionName" + }, + "InvocationType":{ + "shape":"ResponseStreamingInvocationType", + "location":"header", + "locationName":"X-Amz-Invocation-Type" + }, + "LogType":{ + "shape":"LogType", + "location":"header", + "locationName":"X-Amz-Log-Type" + }, + "ClientContext":{ + "shape":"String", + "location":"header", + "locationName":"X-Amz-Client-Context" + }, + "Qualifier":{ + "shape":"Qualifier", + "location":"querystring", + "locationName":"Qualifier" + }, + "Payload":{"shape":"Blob"} + }, + "payload":"Payload" + }, + "InvokeWithResponseStreamResponse":{ + "type":"structure", + "members":{ + "StatusCode":{ + "shape":"Integer", + "location":"statusCode" + }, + "ExecutedVersion":{ + "shape":"Version", + "location":"header", + "locationName":"X-Amz-Executed-Version" + }, + "EventStream":{"shape":"InvokeWithResponseStreamResponseEvent"}, + "ResponseStreamContentType":{ + "shape":"String", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"EventStream" + }, + "InvokeWithResponseStreamResponseEvent":{ + "type":"structure", + "members":{ + "PayloadChunk":{"shape":"InvokeResponseStreamUpdate"}, + "InvokeComplete":{"shape":"InvokeWithResponseStreamCompleteEvent"} + }, + "eventstream":true + }, "KMSAccessDeniedException":{ "type":"structure", "members":{ @@ -3674,6 +3803,13 @@ "error":{"httpStatusCode":502}, "exception":true }, + "ResponseStreamingInvocationType":{ + "type":"string", + "enum":[ + "RequestResponse", + "DryRun" + ] + }, "RoleArn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" @@ -4224,7 +4360,8 @@ "locationName":"Qualifier" }, "AuthType":{"shape":"FunctionUrlAuthType"}, - "Cors":{"shape":"Cors"} + "Cors":{"shape":"Cors"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "UpdateFunctionUrlConfigResponse":{ @@ -4242,7 +4379,8 @@ "AuthType":{"shape":"FunctionUrlAuthType"}, "Cors":{"shape":"Cors"}, "CreationTime":{"shape":"Timestamp"}, - "LastModifiedTime":{"shape":"Timestamp"} + "LastModifiedTime":{"shape":"Timestamp"}, + "InvokeMode":{"shape":"InvokeMode"} } }, "UpdateRuntimeOn":{ diff --git a/models/apis/lambda/2015-03-31/docs-2.json b/models/apis/lambda/2015-03-31/docs-2.json index 131b1602c97..aaf738ca0de 100644 --- a/models/apis/lambda/2015-03-31/docs-2.json +++ b/models/apis/lambda/2015-03-31/docs-2.json @@ -6,7 +6,7 @@ "AddPermission": "Grants an Amazon Web Service, Amazon Web Services account, or Amazon Web Services organization permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST.
To grant permission to another account, specify the account ID as the Principal
. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID
. For Amazon Web Services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com
or sns.amazonaws.com
. For Amazon Web Services, you can also specify the ARN of the associated resource as the SourceArn
. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.
This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda.
", "CreateAlias": "Creates an alias for a Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a different version.
You can also map an alias to split invocation requests between two versions. Use the RoutingConfig
parameter to specify a second version and the percentage of invocation requests that it receives.
Creates a code signing configuration. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).
", - "CreateEventSourceMapping": "Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for stream sources (DynamoDB and Kinesis):
BisectBatchOnFunctionError
– If the function returns an error, split the batch in two and retry.
DestinationConfig
– Send discarded records to an Amazon SQS queue or Amazon SNS topic.
MaximumRecordAgeInSeconds
– Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
– Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
– Process multiple batches from each shard concurrently.
For information about which configuration parameters apply to each event source, see the following topics.
", + "CreateEventSourceMapping": "Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for stream sources (DynamoDB and Kinesis):
BisectBatchOnFunctionError
– If the function returns an error, split the batch in two and retry.
DestinationConfig
– Send discarded records to an Amazon SQS queue or Amazon SNS topic.
MaximumRecordAgeInSeconds
– Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
– Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
– Process multiple batches from each shard concurrently.
For information about which configuration parameters apply to each event source, see the following topics.
Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing.
If the deployment package is a container image, then you set the package type to Image
. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.
If the deployment package is a .zip file archive, then you set the package type to Zip
. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64
or arm64
). If you do not specify the architecture, then the default value is x86-64
.
When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State
, StateReason
, and StateReasonCode
fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states.
A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish
parameter to create version 1
of your function from its initial configuration.
The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).
You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function.
If another Amazon Web Services account or an Amazon Web Service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias.
To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions.
", "CreateFunctionUrlConfig": "Creates a Lambda function URL with the specified configuration parameters. A function URL is a dedicated HTTP(S) endpoint that you can use to invoke your function.
", "DeleteAlias": "Deletes a Lambda function alias.
", @@ -37,6 +37,7 @@ "GetRuntimeManagementConfig": "Retrieves the runtime management configuration for a function's version. If the runtime update mode is Manual, this includes the ARN of the runtime version and the runtime update mode. If the runtime update mode is Auto or Function update, this includes the runtime update mode and null
is returned for the ARN. For more information, see Runtime updates.
Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType
to Event
.
For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.
When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda.
For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.
The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException
if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded
) or function level (ReservedFunctionConcurrentInvocationLimitExceeded
).
For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.
This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.
", "InvokeAsync": "For asynchronous function invocation, use Invoke.
Invokes a function asynchronously.
", + "InvokeWithResponseStream": "Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.
", "ListAliases": "Returns a list of aliases for a Lambda function.
", "ListCodeSigningConfigs": "Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems
parameter to return fewer configurations per call.
Lists event source mappings. Specify an EventSourceArn
to show only event source mappings for a single event source.
Removes tags from a function.
", "UpdateAlias": "Updates the configuration of a Lambda function alias.
", "UpdateCodeSigningConfig": "Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.
", - "UpdateEventSourceMapping": "Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for stream sources (DynamoDB and Kinesis):
BisectBatchOnFunctionError
– If the function returns an error, split the batch in two and retry.
DestinationConfig
– Send discarded records to an Amazon SQS queue or Amazon SNS topic.
MaximumRecordAgeInSeconds
– Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
– Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
– Process multiple batches from each shard concurrently.
For information about which configuration parameters apply to each event source, see the following topics.
", + "UpdateEventSourceMapping": "Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for stream sources (DynamoDB and Kinesis):
BisectBatchOnFunctionError
– If the function returns an error, split the batch in two and retry.
DestinationConfig
– Send discarded records to an Amazon SQS queue or Amazon SNS topic.
MaximumRecordAgeInSeconds
– Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
– Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
– Process multiple batches from each shard concurrently.
For information about which configuration parameters apply to each event source, see the following topics.
Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing for Lambda.
If the function's package type is Image
, then you must specify the code package in ImageUri
as the URI of a container image in the Amazon ECR registry.
If the function's package type is Zip
, then you must specify the deployment package as a .zip file archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide the function code inline using the ZipFile
field.
The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64
or arm64
).
The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.
For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.
Modify the version-specific settings of a Lambda function.
When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus
, LastUpdateStatusReason
, and LastUpdateStatusReasonCode
fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states.
These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.
To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Service, use AddPermission.
", "UpdateFunctionEventInvokeConfig": "Updates the configuration for asynchronous invocation for a function, version, or alias.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
", @@ -203,30 +204,30 @@ "base": null, "refs": { "AddPermissionRequest$SourceArn": "For Amazon Web Services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.
Note that Lambda configures the comparison using the StringLike
operator.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster.
Amazon MQ – The ARN of the broker.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster.
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The Amazon Resource Name (ARN) of the event source.
", "FunctionConfiguration$SigningProfileVersionArn": "The ARN of the signing profile version.
", "FunctionConfiguration$SigningJobArn": "The ARN of the signing job.
", "Layer$SigningProfileVersionArn": "The Amazon Resource Name (ARN) for a signing profile version.
", "Layer$SigningJobArn": "The Amazon Resource Name (ARN) of a signing job.
", - "ListEventSourceMappingsRequest$EventSourceArn": "The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster.
Amazon MQ – The ARN of the broker.
The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster.
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Amazon Kinesis – Default 100. Max 10,000.
Amazon DynamoDB Streams – Default 100. Max 10,000.
Amazon Simple Queue Service – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
Amazon Managed Streaming for Apache Kafka – Default 100. Max 10,000.
Self-managed Apache Kafka – Default 100. Max 10,000.
Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000.
The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Amazon Kinesis – Default 100. Max 10,000.
Amazon DynamoDB Streams – Default 100. Max 10,000.
Amazon Simple Queue Service – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
Amazon Managed Streaming for Apache Kafka – Default 100. Max 10,000.
Self-managed Apache Kafka – Default 100. Max 10,000.
Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000.
DocumentDB – Default 100. Max 10,000.
The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Default value: Varies by service. For Amazon SQS, the default is 10. For all other services, the default is 100.
Related setting: When you set BatchSize
to a value greater than 10, you must set MaximumBatchingWindowInSeconds
to at least 1.
The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Amazon Kinesis – Default 100. Max 10,000.
Amazon DynamoDB Streams – Default 100. Max 10,000.
Amazon Simple Queue Service – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
Amazon Managed Streaming for Apache Kafka – Default 100. Max 10,000.
Self-managed Apache Kafka – Default 100. Max 10,000.
Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000.
The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Amazon Kinesis – Default 100. Max 10,000.
Amazon DynamoDB Streams – Default 100. Max 10,000.
Amazon Simple Queue Service – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
Amazon Managed Streaming for Apache Kafka – Default 100. Max 10,000.
Self-managed Apache Kafka – Default 100. Max 10,000.
Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000.
DocumentDB – Default 100. Max 10,000.
(Streams only) If the function returns an error, split the batch in two and retry.
", - "EventSourceMappingConfiguration$BisectBatchOnFunctionError": "(Streams only) If the function returns an error, split the batch in two and retry. The default value is false.
", - "UpdateEventSourceMappingRequest$BisectBatchOnFunctionError": "(Streams only) If the function returns an error, split the batch in two and retry.
" + "CreateEventSourceMappingRequest$BisectBatchOnFunctionError": "(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.
", + "EventSourceMappingConfiguration$BisectBatchOnFunctionError": "(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.
", + "UpdateEventSourceMappingRequest$BisectBatchOnFunctionError": "(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.
" } }, "Blob": { @@ -235,6 +236,8 @@ "FunctionCode$ZipFile": "The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you.
", "InvocationRequest$Payload": "The JSON that you want to provide to your Lambda function as input.
You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'
. You can also specify a file path. For example, --payload file://payload.json
.
The response from the function, or an error object.
", + "InvokeResponseStreamUpdate$Payload": "Data returned by your Lambda function.
", + "InvokeWithResponseStreamRequest$Payload": "The JSON that you want to provide to your Lambda function as input.
You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'
. You can also specify a file path. For example, --payload file://payload.json
.
The base64-encoded contents of the layer archive. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.
", "UpdateFunctionCodeRequest$ZipFile": "The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you. Use only with a function defined with a .zip file archive deployment package.
" } @@ -500,12 +503,12 @@ "DestinationConfig": { "base": "A configuration object that specifies the destination of an event after Lambda processes it.
", "refs": { - "CreateEventSourceMappingRequest$DestinationConfig": "(Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.
", - "EventSourceMappingConfiguration$DestinationConfig": "(Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.
", - "FunctionEventInvokeConfig$DestinationConfig": "A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of an SQS queue.
Topic - The ARN of an SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of an SQS queue.
Topic - The ARN of an SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
(Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.
", - "UpdateFunctionEventInvokeConfigRequest$DestinationConfig": "A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of an SQS queue.
Topic - The ARN of an SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.
", + "EventSourceMappingConfiguration$DestinationConfig": "(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.
", + "FunctionEventInvokeConfig$DestinationConfig": "A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.
", + "UpdateFunctionEventInvokeConfigRequest$DestinationConfig": "A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP
is supported only for Amazon Kinesis streams.
The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP
is supported only for Amazon Kinesis streams.
The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP
is supported only for Amazon Kinesis streams and Amazon DocumentDB.
The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP
is supported only for Amazon Kinesis streams and Amazon DocumentDB.
(Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.
", - "EventSourceMappingConfiguration$FunctionResponseTypes": "(Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.
", - "UpdateEventSourceMappingRequest$FunctionResponseTypes": "(Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.
" + "CreateEventSourceMappingRequest$FunctionResponseTypes": "(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
", + "EventSourceMappingConfiguration$FunctionResponseTypes": "(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
", + "UpdateEventSourceMappingRequest$FunctionResponseTypes": "(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
" } }, "FunctionUrl": { @@ -1064,7 +1067,8 @@ "base": null, "refs": { "AccountLimit$ConcurrentExecutions": "The maximum number of simultaneous function executions.
", - "InvocationResponse$StatusCode": "The HTTP status code is in the 200 range for a successful request. For the RequestResponse
invocation type, this status code is 200. For the Event
invocation type, this status code is 202. For the DryRun
invocation type, the status code is 204.
The HTTP status code is in the 200 range for a successful request. For the RequestResponse
invocation type, this status code is 200. For the Event
invocation type, this status code is 202. For the DryRun
invocation type, the status code is 204.
For a successful request, the HTTP status code is in the 200 range. For the RequestResponse
invocation type, this status code is 200. For the DryRun
invocation type, this status code is 204.
Use one of the following options:
BUFFERED
– This is the default option. Lambda invokes your function using the Invoke
API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM
– Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream
API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
Use one of the following options:
BUFFERED
– This is the default option. Lambda invokes your function using the Invoke
API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM
– Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream
API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
Use one of the following options:
BUFFERED
– This is the default option. Lambda invokes your function using the Invoke
API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM
– Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream
API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
Use one of the following options:
BUFFERED
– This is the default option. Lambda invokes your function using the Invoke
API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM
– Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream
API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
Use one of the following options:
BUFFERED
– This is the default option. Lambda invokes your function using the Invoke
API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM
– Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream
API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
Use one of the following options:
BUFFERED
– This is the default option. Lambda invokes your function using the Invoke
API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM
– Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream
API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
A chunk of the streamed response payload.
", + "refs": { + "InvokeWithResponseStreamResponseEvent$PayloadChunk": "A chunk of the streamed response payload.
" + } + }, + "InvokeWithResponseStreamCompleteEvent": { + "base": "A response confirming that the event stream is complete.
", + "refs": { + "InvokeWithResponseStreamResponseEvent$InvokeComplete": "An object that's returned when the stream has ended and all the payload chunks have been returned.
" + } + }, + "InvokeWithResponseStreamRequest": { + "base": null, + "refs": { + } + }, + "InvokeWithResponseStreamResponse": { + "base": null, + "refs": { + } + }, + "InvokeWithResponseStreamResponseEvent": { + "base": "An object that includes a chunk of the response payload. When the stream has ended, Lambda includes a InvokeComplete
object.
The stream of response payloads.
" + } + }, "KMSAccessDeniedException": { "base": "Lambda couldn't decrypt the environment variables because KMS access was denied. Check the Lambda function's KMS permissions.
", "refs": { @@ -1426,7 +1469,8 @@ "LogType": { "base": null, "refs": { - "InvocationRequest$LogType": "Set to Tail
to include the execution log in the response. Applies to synchronously invoked functions only.
Set to Tail
to include the execution log in the response. Applies to synchronously invoked functions only.
Set to Tail
to include the execution log in the response. Applies to synchronously invoked functions only.
The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds
to any value from 0 seconds to 300 seconds in increments of seconds.
For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds
in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For streams and Amazon SQS event sources, when you set BatchSize
to a value greater than 10, you must set MaximumBatchingWindowInSeconds
to at least 1.
The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds
to any value from 0 seconds to 300 seconds in increments of seconds.
For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds
in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For streams and Amazon SQS event sources, when you set BatchSize
to a value greater than 10, you must set MaximumBatchingWindowInSeconds
to at least 1.
The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds
to any value from 0 seconds to 300 seconds in increments of seconds.
For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, and Amazon MQ event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds
in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For streams and Amazon SQS event sources, when you set BatchSize
to a value greater than 10, you must set MaximumBatchingWindowInSeconds
to at least 1.
The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds
to any value from 0 seconds to 300 seconds in increments of seconds.
For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds
in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For streams and Amazon SQS event sources, when you set BatchSize
to a value greater than 10, you must set MaximumBatchingWindowInSeconds
to at least 1.
The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds
to any value from 0 seconds to 300 seconds in increments of seconds.
For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds
in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For streams and Amazon SQS event sources, when you set BatchSize
to a value greater than 10, you must set MaximumBatchingWindowInSeconds
to at least 1.
The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds
to any value from 0 seconds to 300 seconds in increments of seconds.
For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds
in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For streams and Amazon SQS event sources, when you set BatchSize
to a value greater than 10, you must set MaximumBatchingWindowInSeconds
to at least 1.
(Streams only) Discard records older than the specified age. The default value is infinite (-1).
", - "EventSourceMappingConfiguration$MaximumRecordAgeInSeconds": "(Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.
", - "UpdateEventSourceMappingRequest$MaximumRecordAgeInSeconds": "(Streams only) Discard records older than the specified age. The default value is infinite (-1).
" + "CreateEventSourceMappingRequest$MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).
", + "EventSourceMappingConfiguration$MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.
", + "UpdateEventSourceMappingRequest$MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).
" } }, "MaximumRetryAttempts": { @@ -1531,9 +1575,9 @@ "MaximumRetryAttemptsEventSourceMapping": { "base": null, "refs": { - "CreateEventSourceMappingRequest$MaximumRetryAttempts": "(Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
", - "EventSourceMappingConfiguration$MaximumRetryAttempts": "(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.
", - "UpdateEventSourceMappingRequest$MaximumRetryAttempts": "(Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
" + "CreateEventSourceMappingRequest$MaximumRetryAttempts": "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
", + "EventSourceMappingConfiguration$MaximumRetryAttempts": "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.
", + "UpdateEventSourceMappingRequest$MaximumRetryAttempts": "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
" } }, "MemorySize": { @@ -1567,6 +1611,7 @@ "GetRuntimeManagementConfigRequest$FunctionName": "The name of the Lambda function.
Name formats
Function name – my-function
.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function
.
Partial ARN – 123456789012:function:my-function
.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", "InvocationRequest$FunctionName": "The name of the Lambda function, version, or alias.
Name formats
Function name – my-function
(name-only), my-function:v1
(with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function
.
Partial ARN – 123456789012:function:my-function
.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", "InvokeAsyncRequest$FunctionName": "The name of the Lambda function.
Name formats
Function name – my-function
.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function
.
Partial ARN – 123456789012:function:my-function
.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", + "InvokeWithResponseStreamRequest$FunctionName": "The name of the Lambda function.
Name formats
Function name – my-function
.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function
.
Partial ARN – 123456789012:function:my-function
.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", "ListVersionsByFunctionRequest$FunctionName": "The name of the Lambda function.
Name formats
Function name - MyFunction
.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction
.
Partial ARN - 123456789012:function:MyFunction
.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
" } }, @@ -1621,9 +1666,9 @@ "ParallelizationFactor": { "base": null, "refs": { - "CreateEventSourceMappingRequest$ParallelizationFactor": "(Streams only) The number of batches to process from each shard concurrently.
", - "EventSourceMappingConfiguration$ParallelizationFactor": "(Streams only) The number of batches to process concurrently from each shard. The default value is 1.
", - "UpdateEventSourceMappingRequest$ParallelizationFactor": "(Streams only) The number of batches to process from each shard concurrently.
" + "CreateEventSourceMappingRequest$ParallelizationFactor": "(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.
", + "EventSourceMappingConfiguration$ParallelizationFactor": "(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.
", + "UpdateEventSourceMappingRequest$ParallelizationFactor": "(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.
" } }, "Pattern": { @@ -1757,6 +1802,7 @@ "GetProvisionedConcurrencyConfigRequest$Qualifier": "The version number or alias name.
", "GetRuntimeManagementConfigRequest$Qualifier": "Specify a version of the function. This can be $LATEST
or a published version number. If no value is specified, the configuration for the $LATEST
version is returned.
Specify a version or alias to invoke a published version of the function.
", + "InvokeWithResponseStreamRequest$Qualifier": "The alias name.
", "PutFunctionEventInvokeConfigRequest$Qualifier": "A version number or alias name.
", "PutProvisionedConcurrencyConfigRequest$Qualifier": "The version number or alias name.
", "PutRuntimeManagementConfigRequest$Qualifier": "Specify a version of the function. This can be $LATEST
or a published version number. If no value is specified, the configuration for the $LATEST
version is returned.
Use one of the following options:
RequestResponse
(default) – Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API operation response includes the function response and additional data.
DryRun
– Validate parameter values and verify that the IAM user or role has permission to invoke the function.
Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.
", "InvocationResponse$FunctionError": "If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.
", "InvocationResponse$LogResult": "The last 4 KB of the execution log, which is base64-encoded.
", + "InvokeWithResponseStreamCompleteEvent$ErrorCode": "An error code.
", + "InvokeWithResponseStreamCompleteEvent$ErrorDetails": "The details of any returned error.
", + "InvokeWithResponseStreamCompleteEvent$LogResult": "The last 4 KB of the execution log, which is base64-encoded.
", + "InvokeWithResponseStreamRequest$ClientContext": "Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.
", + "InvokeWithResponseStreamResponse$ResponseStreamContentType": "The type of data the stream is returning.
", "KMSAccessDeniedException$Type": null, "KMSAccessDeniedException$Message": null, "KMSDisabledException$Type": null, @@ -2319,9 +2376,9 @@ "TumblingWindowInSeconds": { "base": null, "refs": { - "CreateEventSourceMappingRequest$TumblingWindowInSeconds": "(Streams only) The duration in seconds of a processing window. The range is between 1 second and 900 seconds.
", - "EventSourceMappingConfiguration$TumblingWindowInSeconds": "(Streams only) The duration in seconds of a processing window. The range is 1–900 seconds.
", - "UpdateEventSourceMappingRequest$TumblingWindowInSeconds": "(Streams only) The duration in seconds of a processing window. The range is between 1 second and 900 seconds.
" + "CreateEventSourceMappingRequest$TumblingWindowInSeconds": "(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
", + "EventSourceMappingConfiguration$TumblingWindowInSeconds": "(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
", + "UpdateEventSourceMappingRequest$TumblingWindowInSeconds": "(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
" } }, "URI": { @@ -2408,6 +2465,7 @@ "CreateAliasRequest$FunctionVersion": "The function version that the alias invokes.
", "FunctionConfiguration$Version": "The version of the Lambda function.
", "InvocationResponse$ExecutedVersion": "The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.
", + "InvokeWithResponseStreamResponse$ExecutedVersion": "The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.
", "ListAliasesRequest$FunctionVersion": "Specify a function version to only list aliases that invoke that version.
", "UpdateAliasRequest$FunctionVersion": "The function version that the alias invokes.
" } diff --git a/models/apis/lambda/2015-03-31/endpoint-tests-1.json b/models/apis/lambda/2015-03-31/endpoint-tests-1.json index 06ce0daa036..09d226188e5 100644 --- a/models/apis/lambda/2015-03-31/endpoint-tests-1.json +++ b/models/apis/lambda/2015-03-31/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "af-south-1" + "Region": "af-south-1", + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "af-south-1" + "Region": "af-south-1", + "UseDualStack": true } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-east-1" + "Region": "ap-east-1", + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-east-1" + "Region": "ap-east-1", + "UseDualStack": true } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-1" + "Region": "ap-northeast-1", + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-1" + "Region": "ap-northeast-1", + "UseDualStack": true } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-2" + "Region": "ap-northeast-2", + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-2" + "Region": "ap-northeast-2", + "UseDualStack": true } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-3" + "Region": "ap-northeast-3", + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-3" + "Region": "ap-northeast-3", + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-south-1" + "Region": "ap-south-1", + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-south-1" + "Region": "ap-south-1", + "UseDualStack": true } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-1" + "Region": "ap-southeast-1", + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-1" + "Region": "ap-southeast-1", + "UseDualStack": true } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-2" + "Region": "ap-southeast-2", + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-2" + "Region": "ap-southeast-2", + "UseDualStack": true } }, { @@ -216,9 +216,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-3" + "Region": "ap-southeast-3", + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-3" + "Region": "ap-southeast-3", + "UseDualStack": true } }, { @@ -242,9 +242,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "ca-central-1" + "Region": "ca-central-1", + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "ca-central-1" + "Region": "ca-central-1", + "UseDualStack": true } }, { @@ -268,9 +268,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-central-1" + "Region": "eu-central-1", + "UseDualStack": false } }, { @@ -281,9 +281,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-central-1" + "Region": "eu-central-1", + "UseDualStack": true } }, { @@ -294,9 +294,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-north-1" + "Region": "eu-north-1", + "UseDualStack": false } }, { @@ -307,9 +307,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-north-1" + "Region": "eu-north-1", + "UseDualStack": true } }, { @@ -320,9 +320,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-south-1" + "Region": "eu-south-1", + "UseDualStack": false } }, { @@ -333,9 +333,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-south-1" + "Region": "eu-south-1", + "UseDualStack": true } }, { @@ -346,9 +346,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-1" + "Region": "eu-west-1", + "UseDualStack": false } }, { @@ -359,9 +359,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-1" + "Region": "eu-west-1", + "UseDualStack": true } }, { @@ -372,9 +372,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseDualStack": false } }, { @@ -385,9 +385,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseDualStack": true } }, { @@ -398,9 +398,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-3" + "Region": "eu-west-3", + "UseDualStack": false } }, { @@ -411,9 +411,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-3" + "Region": "eu-west-3", + "UseDualStack": true } }, { @@ -424,9 +424,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "me-south-1" + "Region": "me-south-1", + "UseDualStack": false } }, { @@ -437,9 +437,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "me-south-1" + "Region": "me-south-1", + "UseDualStack": true } }, { @@ -450,9 +450,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "sa-east-1" + "Region": "sa-east-1", + "UseDualStack": false } }, { @@ -463,9 +463,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "sa-east-1" + "Region": "sa-east-1", + "UseDualStack": true } }, { @@ -476,9 +476,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": false } }, { @@ -489,9 +489,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": false } }, { @@ -502,9 +502,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { @@ -515,9 +515,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": false } }, { @@ -528,9 +528,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": false } }, { @@ -541,9 +541,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": true } }, { @@ -554,9 +554,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": false } }, { @@ -567,9 +567,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": false } }, { @@ -580,9 +580,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": true } }, { @@ -593,9 +593,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": false } }, { @@ -606,9 +606,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": false } }, { @@ -619,9 +619,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": true } }, { @@ -632,9 +632,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { @@ -645,9 +645,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { @@ -658,9 +658,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": true } }, { @@ -671,9 +671,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "cn-northwest-1" + "Region": "cn-northwest-1", + "UseDualStack": false } }, { @@ -684,9 +684,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "cn-northwest-1" + "Region": "cn-northwest-1", + "UseDualStack": true } }, { @@ -697,9 +697,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": true } }, { @@ -710,9 +710,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { @@ -723,9 +723,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { @@ -736,9 +736,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { @@ -749,9 +749,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-gov-west-1" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { @@ -762,9 +762,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-gov-west-1" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { @@ -775,9 +775,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { @@ -788,9 +788,9 @@ } }, "params": { - "UseDualStack": true, "UseFIPS": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { @@ -801,9 +801,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { @@ -814,9 +814,20 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-iso-west-1" + "Region": "us-iso-west-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-iso-east-1", + "UseDualStack": true } }, { @@ -827,9 +838,20 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-iso-east-1", + "UseDualStack": true } }, { @@ -840,9 +862,20 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseFIPS": true, + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -853,9 +886,20 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseFIPS": false, + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -866,9 +910,9 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -880,8 +924,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -891,9 +935,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "UseFIPS": true, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -903,11 +947,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "UseFIPS": false, "Region": "us-east-1", + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/quicksight/2018-04-01/api-2.json b/models/apis/quicksight/2018-04-01/api-2.json index 081cf97fd4b..5619d901f53 100644 --- a/models/apis/quicksight/2018-04-01/api-2.json +++ b/models/apis/quicksight/2018-04-01/api-2.json @@ -278,6 +278,25 @@ {"shape":"ResourceUnavailableException"} ] }, + "CreateRefreshSchedule":{ + "name":"CreateRefreshSchedule", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules" + }, + "input":{"shape":"CreateRefreshScheduleRequest"}, + "output":{"shape":"CreateRefreshScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"} + ] + }, "CreateTemplate":{ "name":"CreateTemplate", "http":{ @@ -441,6 +460,24 @@ {"shape":"InternalFailureException"} ] }, + "DeleteDataSetRefreshProperties":{ + "name":"DeleteDataSetRefreshProperties", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties" + }, + "input":{"shape":"DeleteDataSetRefreshPropertiesRequest"}, + "output":{"shape":"DeleteDataSetRefreshPropertiesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"InternalFailureException"} + ] + }, "DeleteDataSource":{ "name":"DeleteDataSource", "http":{ @@ -565,6 +602,23 @@ {"shape":"ResourceUnavailableException"} ] }, + "DeleteRefreshSchedule":{ + "name":"DeleteRefreshSchedule", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules/{ScheduleId}" + }, + "input":{"shape":"DeleteRefreshScheduleRequest"}, + "output":{"shape":"DeleteRefreshScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ] + }, "DeleteTemplate":{ "name":"DeleteTemplate", "http":{ @@ -857,6 +911,24 @@ {"shape":"InternalFailureException"} ] }, + "DescribeDataSetRefreshProperties":{ + "name":"DescribeDataSetRefreshProperties", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties" + }, + "input":{"shape":"DescribeDataSetRefreshPropertiesRequest"}, + "output":{"shape":"DescribeDataSetRefreshPropertiesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"} + ] + }, "DescribeDataSource":{ "name":"DescribeDataSource", "http":{ @@ -1043,6 +1115,23 @@ {"shape":"ResourceUnavailableException"} ] }, + "DescribeRefreshSchedule":{ + "name":"DescribeRefreshSchedule", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules/{ScheduleId}" + }, + "input":{"shape":"DescribeRefreshScheduleRequest"}, + "output":{"shape":"DescribeRefreshScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ] + }, "DescribeTemplate":{ "name":"DescribeTemplate", "http":{ @@ -1490,6 +1579,23 @@ {"shape":"ResourceUnavailableException"} ] }, + "ListRefreshSchedules":{ + "name":"ListRefreshSchedules", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules" + }, + "input":{"shape":"ListRefreshSchedulesRequest"}, + "output":{"shape":"ListRefreshSchedulesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -1647,6 +1753,25 @@ {"shape":"ResourceUnavailableException"} ] }, + "PutDataSetRefreshProperties":{ + "name":"PutDataSetRefreshProperties", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-properties" + }, + "input":{"shape":"PutDataSetRefreshPropertiesRequest"}, + "output":{"shape":"PutDataSetRefreshPropertiesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"ConflictException"}, + {"shape":"InternalFailureException"} + ] + }, "RegisterUser":{ "name":"RegisterUser", "http":{ @@ -2124,6 +2249,24 @@ {"shape":"InternalFailureException"} ] }, + "UpdateRefreshSchedule":{ + "name":"UpdateRefreshSchedule", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/refresh-schedules" + }, + "input":{"shape":"UpdateRefreshScheduleRequest"}, + "output":{"shape":"UpdateRefreshScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"} + ] + }, "UpdateTemplate":{ "name":"UpdateTemplate", "http":{ @@ -4198,6 +4341,39 @@ } } }, + "CreateRefreshScheduleRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "AwsAccountId", + "Schedule" + ], + "members":{ + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Schedule":{"shape":"RefreshSchedule"} + } + }, + "CreateRefreshScheduleResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "ScheduleId":{"shape":"String"}, + "Arn":{"shape":"Arn"} + } + }, "CreateTemplateAliasRequest":{ "type":"structure", "required":[ @@ -5008,6 +5184,13 @@ "member":{"shape":"DataSetReference"}, "min":1 }, + "DataSetRefreshProperties":{ + "type":"structure", + "required":["RefreshConfiguration"], + "members":{ + "RefreshConfiguration":{"shape":"RefreshConfiguration"} + } + }, "DataSetSchema":{ "type":"structure", "members":{ @@ -5351,6 +5534,24 @@ "CustomValue":{"shape":"SensitiveTimestamp"} } }, + "DayOfMonth":{ + "type":"string", + "max":17, + "min":1, + "pattern":"^(?:LAST_DAY_OF_MONTH|1[0-9]|2[0-8]|[12]|[3-9])$" + }, + "DayOfWeek":{ + "type":"string", + "enum":[ + "SUNDAY", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY" + ] + }, "DecimalDefaultValueList":{ "type":"list", "member":{"shape":"SensitiveDoubleObject"}, @@ -5581,6 +5782,35 @@ "RequestId":{"shape":"String"} } }, + "DeleteDataSetRefreshPropertiesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DeleteDataSetRefreshPropertiesResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, "DeleteDataSetRequest":{ "type":"structure", "required":[ @@ -5853,6 +6083,43 @@ } } }, + "DeleteRefreshScheduleRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "AwsAccountId", + "ScheduleId" + ], + "members":{ + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ScheduleId":{ + "shape":"String", + "location":"uri", + "locationName":"ScheduleId" + } + } + }, + "DeleteRefreshScheduleResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "ScheduleId":{"shape":"String"}, + "Arn":{"shape":"Arn"} + } + }, "DeleteTemplateAliasRequest":{ "type":"structure", "required":[ @@ -6403,6 +6670,36 @@ } } }, + "DescribeDataSetRefreshPropertiesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DescribeDataSetRefreshPropertiesResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "DataSetRefreshProperties":{"shape":"DataSetRefreshProperties"} + } + }, "DescribeDataSetRequest":{ "type":"structure", "required":[ @@ -6793,6 +7090,43 @@ } } }, + "DescribeRefreshScheduleRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "ScheduleId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "ScheduleId":{ + "shape":"String", + "location":"uri", + "locationName":"ScheduleId" + } + } + }, + "DescribeRefreshScheduleResponse":{ + "type":"structure", + "members":{ + "RefreshSchedule":{"shape":"RefreshSchedule"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "Arn":{"shape":"Arn"} + } + }, "DescribeTemplateAliasRequest":{ "type":"structure", "required":[ @@ -8896,6 +9230,13 @@ "error":{"httpStatusCode":403}, "exception":true }, + "IncrementalRefresh":{ + "type":"structure", + "required":["LookbackWindow"], + "members":{ + "LookbackWindow":{"shape":"LookbackWindow"} + } + }, "Ingestion":{ "type":"structure", "required":[ @@ -8969,7 +9310,8 @@ "REFRESH_SUPPRESSED_BY_EDIT", "PERMISSION_NOT_FOUND", "ELASTICSEARCH_CURSOR_NOT_ENABLED", - "CURSOR_NOT_ENABLED" + "CURSOR_NOT_ENABLED", + "DUPLICATE_COLUMN_NAMES_FOUND" ] }, "IngestionId":{ @@ -10051,6 +10393,36 @@ } } }, + "ListRefreshSchedulesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "ListRefreshSchedulesResponse":{ + "type":"structure", + "members":{ + "RefreshSchedules":{"shape":"RefreshSchedules"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -10478,6 +10850,27 @@ "max":1800, "min":-1800 }, + "LookbackWindow":{ + "type":"structure", + "required":[ + "ColumnName", + "Size", + "SizeUnit" + ], + "members":{ + "ColumnName":{"shape":"String"}, + "Size":{"shape":"PositiveLong"}, + "SizeUnit":{"shape":"LookbackWindowSizeUnit"} + } + }, + "LookbackWindowSizeUnit":{ + "type":"string", + "enum":[ + "HOUR", + "DAY", + "WEEK" + ] + }, "ManifestFileLocation":{ "type":"structure", "required":[ @@ -11555,6 +11948,10 @@ "type":"integer", "min":1 }, + "PositiveLong":{ + "type":"long", + "min":1 + }, "PostgreSqlParameters":{ "type":"structure", "required":[ @@ -11657,6 +12054,37 @@ "max":2000, "min":1 }, + "PutDataSetRefreshPropertiesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "DataSetRefreshProperties" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "DataSetRefreshProperties":{"shape":"DataSetRefreshProperties"} + } + }, + "PutDataSetRefreshPropertiesResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, "Query":{ "type":"string", "max":256, @@ -11912,6 +12340,53 @@ "AFTER_CUSTOM_LABEL" ] }, + "RefreshConfiguration":{ + "type":"structure", + "required":["IncrementalRefresh"], + "members":{ + "IncrementalRefresh":{"shape":"IncrementalRefresh"} + } + }, + "RefreshFrequency":{ + "type":"structure", + "required":["Interval"], + "members":{ + "Interval":{"shape":"RefreshInterval"}, + "RefreshOnDay":{"shape":"ScheduleRefreshOnEntity"}, + "Timezone":{"shape":"String"}, + "TimeOfTheDay":{"shape":"String"} + } + }, + "RefreshInterval":{ + "type":"string", + "enum":[ + "MINUTE15", + "MINUTE30", + "HOURLY", + "DAILY", + "WEEKLY", + "MONTHLY" + ] + }, + "RefreshSchedule":{ + "type":"structure", + "required":[ + "ScheduleId", + "ScheduleFrequency", + "RefreshType" + ], + "members":{ + "ScheduleId":{"shape":"String"}, + "ScheduleFrequency":{"shape":"RefreshFrequency"}, + "StartAfterDateTime":{"shape":"Timestamp"}, + "RefreshType":{"shape":"IngestionType"}, + "Arn":{"shape":"Arn"} + } + }, + "RefreshSchedules":{ + "type":"list", + "member":{"shape":"RefreshSchedule"} + }, "RegisterUserRequest":{ "type":"structure", "required":[ @@ -12293,7 +12768,8 @@ "required":["TagRules"], "members":{ "Status":{"shape":"Status"}, - "TagRules":{"shape":"RowLevelPermissionTagRuleList"} + "TagRules":{"shape":"RowLevelPermissionTagRuleList"}, + "TagRuleConfigurations":{"shape":"RowLevelPermissionTagRuleConfigurationList"} } }, "RowLevelPermissionTagDelimiter":{ @@ -12313,6 +12789,18 @@ "MatchAllValue":{"shape":"SessionTagValue"} } }, + "RowLevelPermissionTagRuleConfiguration":{ + "type":"list", + "member":{"shape":"SessionTagKey"}, + "max":50, + "min":1 + }, + "RowLevelPermissionTagRuleConfigurationList":{ + "type":"list", + "member":{"shape":"RowLevelPermissionTagRuleConfiguration"}, + "max":50, + "min":1 + }, "RowLevelPermissionTagRuleList":{ "type":"list", "member":{"shape":"RowLevelPermissionTagRule"}, @@ -12452,6 +12940,13 @@ "ColumnHierarchies":{"shape":"ColumnHierarchyList"} } }, + "ScheduleRefreshOnEntity":{ + "type":"structure", + "members":{ + "DayOfWeek":{"shape":"DayOfWeek"}, + "DayOfMonth":{"shape":"DayOfMonth"} + } + }, "ScrollBarOptions":{ "type":"structure", "members":{ @@ -15042,6 +15537,39 @@ } } }, + "UpdateRefreshScheduleRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "AwsAccountId", + "Schedule" + ], + "members":{ + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Schedule":{"shape":"RefreshSchedule"} + } + }, + "UpdateRefreshScheduleResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "ScheduleId":{"shape":"String"}, + "Arn":{"shape":"Arn"} + } + }, "UpdateResourcePermissionList":{ "type":"list", "member":{"shape":"ResourcePermission"}, diff --git a/models/apis/quicksight/2018-04-01/docs-2.json b/models/apis/quicksight/2018-04-01/docs-2.json index 99d7d604d73..181286ff0d4 100644 --- a/models/apis/quicksight/2018-04-01/docs-2.json +++ b/models/apis/quicksight/2018-04-01/docs-2.json @@ -16,6 +16,7 @@ "CreateIAMPolicyAssignment": "Creates an assignment with one specified IAM policy, identified by its Amazon Resource Name (ARN). This policy assignment is attached to the specified groups or users of Amazon QuickSight. Assignment names are unique per Amazon Web Services account. To avoid overwriting rules in other namespaces, use assignment names that are unique.
", "CreateIngestion": "Creates and starts a new SPICE ingestion for a dataset. You can manually refresh datasets in an Enterprise edition account 32 times in a 24-hour period. You can manually refresh datasets in a Standard edition account 8 times in a 24-hour period. Each 24-hour period is measured starting 24 hours before the current date and time.
Any ingestions operating on tagged datasets inherit the same tags automatically for use in access control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using tags? in the Amazon Web Services Knowledge Center. Tags are visible on the tagged dataset, but not on the ingestion resource.
", "CreateNamespace": "(Enterprise edition only) Creates a new namespace for you to use with Amazon QuickSight.
A namespace allows you to isolate the Amazon QuickSight users and groups that are registered for that namespace. Users that access the namespace can share assets only with other users or groups in the same namespace. They can't see users and groups in other namespaces. You can create a namespace after your Amazon Web Services account is subscribed to Amazon QuickSight. The namespace must be unique within the Amazon Web Services account. By default, there is a limit of 100 namespaces per Amazon Web Services account. To increase your limit, create a ticket with Amazon Web Services Support.
", + "CreateRefreshSchedule": "Creates a refresh schedule for a dataset. You can create up to 5 different schedules for a single dataset.
", "CreateTemplate": "Creates a template either from a TemplateDefinition
or from an existing Amazon QuickSight analysis or template. You can use the resulting template to create additional dashboards, templates, or analyses.
A template is an entity in Amazon QuickSight that encapsulates the metadata required to create an analysis and that you can use to create s dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with the analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template.
", "CreateTemplateAlias": "Creates a template alias for a template.
", "CreateTheme": "Creates a theme.
A theme is set of configuration options for color and layout. Themes apply to analyses and dashboards. For more information, see Using Themes in Amazon QuickSight in the Amazon QuickSight User Guide.
", @@ -25,6 +26,7 @@ "DeleteAnalysis": "Deletes an analysis from Amazon QuickSight. You can optionally include a recovery window during which you can restore the analysis. If you don't specify a recovery window value, the operation defaults to 30 days. Amazon QuickSight attaches a DeletionTime
stamp to the response that specifies the end of the recovery window. At the end of the recovery window, Amazon QuickSight deletes the analysis permanently.
At any time before recovery window ends, you can use the RestoreAnalysis
API operation to remove the DeletionTime
stamp and cancel the deletion of the analysis. The analysis remains visible in the API until it's deleted, so you can describe it but you can't make a template from it.
An analysis that's scheduled for deletion isn't accessible in the Amazon QuickSight console. To access it in the console, restore it. Deleting an analysis doesn't delete the dashboards that you publish from it.
", "DeleteDashboard": "Deletes a dashboard.
", "DeleteDataSet": "Deletes a dataset.
", + "DeleteDataSetRefreshProperties": "Deletes the dataset refresh properties of the dataset.
", "DeleteDataSource": "Deletes the data source permanently. This operation breaks all the datasets that reference the deleted data source.
", "DeleteFolder": "Deletes an empty folder.
", "DeleteFolderMembership": "Removes an asset, such as a dashboard, analysis, or dataset, from a folder.
", @@ -32,6 +34,7 @@ "DeleteGroupMembership": "Removes a user from a group so that the user is no longer a member of the group.
", "DeleteIAMPolicyAssignment": "Deletes an existing IAM policy assignment.
", "DeleteNamespace": "Deletes a namespace and the users and groups that are associated with the namespace. This is an asynchronous process. Assets including dashboards, analyses, datasets and data sources are not deleted. To delete these assets, you use the API operations for the relevant asset.
", + "DeleteRefreshSchedule": "Deletes a refresh schedule from a dataset.
", "DeleteTemplate": "Deletes a template.
", "DeleteTemplateAlias": "Deletes the item that the specified template alias points to. If you provide a specific alias, you delete the version of the template that the alias points to.
", "DeleteTheme": "Deletes a theme.
", @@ -49,6 +52,7 @@ "DescribeDashboardPermissions": "Describes read and write permissions for a dashboard.
", "DescribeDataSet": "Describes a dataset. This operation doesn't support datasets that include uploaded files as a source.
", "DescribeDataSetPermissions": "Describes the permissions on a dataset.
The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id
.
Describes the refresh properties of a dataset.
", "DescribeDataSource": "Describes a data source.
", "DescribeDataSourcePermissions": "Describes the resource permissions for a data source.
", "DescribeFolder": "Describes a folder.
", @@ -60,6 +64,7 @@ "DescribeIngestion": "Describes a SPICE ingestion.
", "DescribeIpRestriction": "Provides a summary and status of IP rules.
", "DescribeNamespace": "Describes the current namespace.
", + "DescribeRefreshSchedule": "Provides a summary of a refresh schedule.
", "DescribeTemplate": "Describes a template's metadata.
", "DescribeTemplateAlias": "Describes the template alias for a template.
", "DescribeTemplateDefinition": "Provides a detailed description of the definition of a template.
If you do not need to know details about the content of a template, for instance if you are trying to check the status of a recently created or updated template, use the DescribeTemplate
instead.
Lists all the IAM policy assignments, including the Amazon Resource Names (ARNs) for the IAM policies assigned to the specified user and group or groups that the user belongs to.
", "ListIngestions": "Lists the history of SPICE ingestions for a dataset.
", "ListNamespaces": "Lists the namespaces for the specified Amazon Web Services account. This operation doesn't list deleted namespaces.
", + "ListRefreshSchedules": "Lists the refresh schedules of a dataset. Each dataset can have up to 5 schedules.
", "ListTagsForResource": "Lists the tags assigned to a resource.
", "ListTemplateAliases": "Lists all the aliases of a template.
", "ListTemplateVersions": "Lists all the versions of the templates in the current Amazon QuickSight account.
", @@ -94,6 +100,7 @@ "ListThemes": "Lists all the themes in the current Amazon Web Services account.
", "ListUserGroups": "Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of.
", "ListUsers": "Returns a list of all of the Amazon QuickSight users belonging to this account.
", + "PutDataSetRefreshProperties": "Creates or updates the dataset refresh properties for the dataset.
", "RegisterUser": "Creates an Amazon QuickSight user whose identity is associated with the Identity and Access Management (IAM) identity or role specified in the request. When you register a new user from the Amazon QuickSight API, Amazon QuickSight generates a registration URL. The user accesses this registration URL to create their account. Amazon QuickSight doesn't send a registration email to users who are registered from the Amazon QuickSight API. If you want new users to receive a registration email, then add those users in the Amazon QuickSight console. For more information on registering a new user in the Amazon QuickSight console, see Inviting users to access Amazon QuickSight.
", "RestoreAnalysis": "Restores an analysis.
", "SearchAnalyses": "Searches for analyses that belong to the user specified in the filter.
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
Updates an existing IAM policy assignment. This operation updates only the optional parameter or parameters that are specified in the request. This overwrites all of the users included in Identities
.
Updates the content and status of IP rules. To use this operation, you need to provide the entire map of rules. You can use the DescribeIpRestriction
operation to get the current rule map.
Use the UpdatePublicSharingSettings
operation to turn on or turn off the public sharing settings of an Amazon QuickSight dashboard.
To use this operation, turn on session capacity pricing for your Amazon QuickSight account.
Before you can turn on public sharing on your account, make sure to give public sharing permissions to an administrative user in the Identity and Access Management (IAM) console. For more information on using IAM with Amazon QuickSight, see Using Amazon QuickSight with IAM in the Amazon QuickSight User Guide.
", + "UpdateRefreshSchedule": "Updates a refresh schedule for a dataset.
", "UpdateTemplate": "Updates a template from an existing Amazon QuickSight analysis or another template.
", "UpdateTemplateAlias": "Updates the template alias of a template.
", "UpdateTemplatePermissions": "Updates the resource permissions for a template.
", @@ -438,6 +446,7 @@ "CreateIAMPolicyAssignmentResponse$PolicyArn": "The ARN for the IAM policy that is applied to the Amazon QuickSight users and groups specified in this assignment.
", "CreateIngestionResponse$Arn": "The Amazon Resource Name (ARN) for the data ingestion.
", "CreateNamespaceResponse$Arn": "The ARN of the Amazon QuickSight namespace you created.
", + "CreateRefreshScheduleResponse$Arn": "The Amazon Resource Name (ARN) for the refresh schedule.
", "CreateTemplateResponse$Arn": "The ARN for the template.
", "CreateTemplateResponse$VersionArn": "The ARN for the template, including the version information of the first version.
", "CreateThemeResponse$Arn": "The Amazon Resource Name (ARN) for the theme.
", @@ -463,6 +472,7 @@ "DeleteDataSetResponse$Arn": "The Amazon Resource Name (ARN) of the dataset.
", "DeleteDataSourceResponse$Arn": "The Amazon Resource Name (ARN) of the data source that you deleted.
", "DeleteFolderResponse$Arn": "The Amazon Resource Name of the deleted folder.
", + "DeleteRefreshScheduleResponse$Arn": "The Amazon Resource Name (ARN) for the refresh schedule.
", "DeleteTemplateAliasResponse$Arn": "The Amazon Resource Name (ARN) of the template you want to delete.
", "DeleteTemplateResponse$Arn": "The Amazon Resource Name (ARN) of the resource.
", "DeleteThemeAliasResponse$Arn": "The Amazon Resource Name (ARN) of the theme resource using the deleted alias.
", @@ -476,6 +486,7 @@ "DescribeDataSourcePermissionsResponse$DataSourceArn": "The Amazon Resource Name (ARN) of the data source.
", "DescribeFolderPermissionsResponse$Arn": "The Amazon Resource Name (ARN) for the folder.
", "DescribeFolderResolvedPermissionsResponse$Arn": "The Amazon Resource Name (ARN) of the folder.
", + "DescribeRefreshScheduleResponse$Arn": "The Amazon Resource Name (ARN) for the refresh schedule.
", "DescribeTemplateDefinitionResponse$ThemeArn": "The ARN of the theme of the template.
", "DescribeTemplatePermissionsResponse$TemplateArn": "The Amazon Resource Name (ARN) of the template.
", "DescribeThemePermissionsResponse$ThemeArn": "The Amazon Resource Name (ARN) of the theme.
", @@ -494,6 +505,7 @@ "MemberIdArnPair$MemberArn": "The Amazon Resource Name (ARN) of the member.
", "NamespaceInfoV2$Arn": "The namespace ARN.
", "Path$member": null, + "RefreshSchedule$Arn": "The Amazon Resource Name (ARN) for the refresh schedule.
", "RelationalTable$DataSourceArn": "The Amazon Resource Name (ARN) for the data source.
", "RestoreAnalysisResponse$Arn": "The Amazon Resource Name (ARN) of the analysis that you're restoring.
", "RowLevelPermissionDataSet$Arn": "The Amazon Resource Name (ARN) of the dataset that contains permissions for RLS.
", @@ -531,6 +543,7 @@ "UpdateFolderResponse$Arn": "The Amazon Resource Name (ARN) of the folder.
", "UpdateIAMPolicyAssignmentRequest$PolicyArn": "The ARN for the IAM policy to apply to the Amazon QuickSight users and groups specified in this assignment.
", "UpdateIAMPolicyAssignmentResponse$PolicyArn": "The ARN for the IAM policy applied to the Amazon QuickSight users and groups specified in this assignment.
", + "UpdateRefreshScheduleResponse$Arn": "The Amazon Resource Name (ARN) for the refresh schedule.
", "UpdateTemplatePermissionsResponse$TemplateArn": "The Amazon Resource Name (ARN) of the template.
", "UpdateTemplateResponse$Arn": "The Amazon Resource Name (ARN) for the template.
", "UpdateTemplateResponse$VersionArn": "The ARN for the template, including the version information of the first version.
", @@ -601,6 +614,7 @@ "CreateIAMPolicyAssignmentRequest$AwsAccountId": "The ID of the Amazon Web Services account where you want to assign an IAM policy to Amazon QuickSight users or groups.
", "CreateIngestionRequest$AwsAccountId": "The Amazon Web Services account ID.
", "CreateNamespaceRequest$AwsAccountId": "The ID for the Amazon Web Services account that you want to create the Amazon QuickSight namespace in.
", + "CreateRefreshScheduleRequest$AwsAccountId": "The Amazon Web Services account ID.
", "CreateTemplateAliasRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template that you creating an alias for.
", "CreateTemplateRequest$AwsAccountId": "The ID for the Amazon Web Services account that the group is in. You use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.
", "CreateThemeAliasRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the theme for the new theme alias.
", @@ -609,6 +623,7 @@ "DeleteAccountSubscriptionRequest$AwsAccountId": "The Amazon Web Services account ID of the account that you want to delete.
", "DeleteAnalysisRequest$AwsAccountId": "The ID of the Amazon Web Services account where you want to delete an analysis.
", "DeleteDashboardRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the dashboard that you're deleting.
", + "DeleteDataSetRefreshPropertiesRequest$AwsAccountId": "The Amazon Web Services account ID.
", "DeleteDataSetRequest$AwsAccountId": "The Amazon Web Services account ID.
", "DeleteDataSourceRequest$AwsAccountId": "The Amazon Web Services account ID.
", "DeleteFolderMembershipRequest$AwsAccountId": "The ID for the Amazon Web Services account that contains the folder.
", @@ -617,6 +632,7 @@ "DeleteGroupRequest$AwsAccountId": "The ID for the Amazon Web Services account that the group is in. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.
", "DeleteIAMPolicyAssignmentRequest$AwsAccountId": "The Amazon Web Services account ID where you want to delete the IAM policy assignment.
", "DeleteNamespaceRequest$AwsAccountId": "The ID for the Amazon Web Services account that you want to delete the Amazon QuickSight namespace from.
", + "DeleteRefreshScheduleRequest$AwsAccountId": "The Amazon Web Services account ID.
", "DeleteTemplateAliasRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the item to delete.
", "DeleteTemplateRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template that you're deleting.
", "DeleteThemeAliasRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the theme alias to delete.
", @@ -634,6 +650,7 @@ "DescribeDashboardPermissionsRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the dashboard that you're describing permissions for.
", "DescribeDashboardRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the dashboard that you're describing.
", "DescribeDataSetPermissionsRequest$AwsAccountId": "The Amazon Web Services account ID.
", + "DescribeDataSetRefreshPropertiesRequest$AwsAccountId": "The Amazon Web Services account ID.
", "DescribeDataSetRequest$AwsAccountId": "The Amazon Web Services account ID.
", "DescribeDataSourcePermissionsRequest$AwsAccountId": "The Amazon Web Services account ID.
", "DescribeDataSourceRequest$AwsAccountId": "The Amazon Web Services account ID.
", @@ -647,6 +664,7 @@ "DescribeIpRestrictionRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the IP rules.
", "DescribeIpRestrictionResponse$AwsAccountId": "The ID of the Amazon Web Services account that contains the IP rules.
", "DescribeNamespaceRequest$AwsAccountId": "The ID for the Amazon Web Services account that contains the Amazon QuickSight namespace that you want to describe.
", + "DescribeRefreshScheduleRequest$AwsAccountId": "The Amazon Web Services account ID.
", "DescribeTemplateAliasRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template alias that you're describing.
", "DescribeTemplateDefinitionRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template. You must be using the Amazon Web Services account that the template is in.
", "DescribeTemplatePermissionsRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template that you're describing.
", @@ -672,6 +690,7 @@ "ListIAMPolicyAssignmentsRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains these IAM policy assignments.
", "ListIngestionsRequest$AwsAccountId": "The Amazon Web Services account ID.
", "ListNamespacesRequest$AwsAccountId": "The ID for the Amazon Web Services account that contains the Amazon QuickSight namespaces that you want to list.
", + "ListRefreshSchedulesRequest$AwsAccountId": "The Amazon Web Services account ID.
", "ListTemplateAliasesRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template aliases that you're listing.
", "ListTemplateVersionsRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the templates that you're listing.
", "ListTemplatesRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the templates that you're listing.
", @@ -680,6 +699,7 @@ "ListThemesRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the themes that you're listing.
", "ListUserGroupsRequest$AwsAccountId": "The Amazon Web Services account ID that the user is in. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.
", "ListUsersRequest$AwsAccountId": "The ID for the Amazon Web Services account that the user is in. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.
", + "PutDataSetRefreshPropertiesRequest$AwsAccountId": "The Amazon Web Services account ID.
", "RegisterUserRequest$AwsAccountId": "The ID for the Amazon Web Services account that the user is in. Currently, you use the ID for the Amazon Web Services account that contains your Amazon QuickSight account.
", "RestoreAnalysisRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the analysis.
", "SearchAnalysesRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the analyses that you're searching for.
", @@ -707,6 +727,7 @@ "UpdateIpRestrictionRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the IP rules.
", "UpdateIpRestrictionResponse$AwsAccountId": "The ID of the Amazon Web Services account that contains the IP rules.
", "UpdatePublicSharingSettingsRequest$AwsAccountId": "The Amazon Web Services account ID associated with your Amazon QuickSight subscription.
", + "UpdateRefreshScheduleRequest$AwsAccountId": "The Amazon Web Services account ID.
", "UpdateTemplateAliasRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template alias that you're updating.
", "UpdateTemplatePermissionsRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template.
", "UpdateTemplateRequest$AwsAccountId": "The ID of the Amazon Web Services account that contains the template that you're updating.
", @@ -1798,6 +1819,16 @@ "refs": { } }, + "CreateRefreshScheduleRequest": { + "base": null, + "refs": { + } + }, + "CreateRefreshScheduleResponse": { + "base": null, + "refs": { + } + }, "CreateTemplateAliasRequest": { "base": null, "refs": { @@ -2321,6 +2352,13 @@ "TemplateSourceAnalysis$DataSetReferences": "A structure containing information about the dataset references used as placeholders in the template.
" } }, + "DataSetRefreshProperties": { + "base": "The refresh properties of a dataset.
", + "refs": { + "DescribeDataSetRefreshPropertiesResponse$DataSetRefreshProperties": "The dataset refresh properties.
", + "PutDataSetRefreshPropertiesRequest$DataSetRefreshProperties": "The dataset refresh properties.
" + } + }, "DataSetSchema": { "base": "Dataset schema.
", "refs": { @@ -2559,6 +2597,18 @@ "DateTimeParameterDeclaration$ValueWhenUnset": "The configuration that defines the default value of a DateTime
parameter when a value has not been set.
The day of the month that you want to schedule refresh on.
" + } + }, + "DayOfWeek": { + "base": null, + "refs": { + "ScheduleRefreshOnEntity$DayOfWeek": "The day of the week that you want to schedule a refresh on.
" + } + }, "DecimalDefaultValueList": { "base": null, "refs": { @@ -2686,6 +2736,16 @@ "refs": { } }, + "DeleteDataSetRefreshPropertiesRequest": { + "base": null, + "refs": { + } + }, + "DeleteDataSetRefreshPropertiesResponse": { + "base": null, + "refs": { + } + }, "DeleteDataSetRequest": { "base": null, "refs": { @@ -2766,6 +2826,16 @@ "refs": { } }, + "DeleteRefreshScheduleRequest": { + "base": null, + "refs": { + } + }, + "DeleteRefreshScheduleResponse": { + "base": null, + "refs": { + } + }, "DeleteTemplateAliasRequest": { "base": null, "refs": { @@ -2932,6 +3002,16 @@ "refs": { } }, + "DescribeDataSetRefreshPropertiesRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSetRefreshPropertiesResponse": { + "base": null, + "refs": { + } + }, "DescribeDataSetRequest": { "base": null, "refs": { @@ -3052,6 +3132,16 @@ "refs": { } }, + "DescribeRefreshScheduleRequest": { + "base": null, + "refs": { + } + }, + "DescribeRefreshScheduleResponse": { + "base": null, + "refs": { + } + }, "DescribeTemplateAliasRequest": { "base": null, "refs": { @@ -4665,6 +4755,12 @@ "refs": { } }, + "IncrementalRefresh": { + "base": "The incremental refresh configuration for a dataset.
", + "refs": { + "RefreshConfiguration$IncrementalRefresh": "The incremental refresh for the dataset.
" + } + }, "Ingestion": { "base": "Information about the SPICE ingestion for a dataset.
", "refs": { @@ -4717,7 +4813,8 @@ "IngestionType": { "base": "This defines the type of ingestion user wants to trigger. This is part of create ingestion request.", "refs": { - "CreateIngestionRequest$IngestionType": "The type of ingestion that you want to create.
" + "CreateIngestionRequest$IngestionType": "The type of ingestion that you want to create.
", + "RefreshSchedule$RefreshType": "The type of refresh that a datset undergoes. Valid values are as follows:
FULL_REFRESH
: A complete refresh of a dataset.
INCREMENTAL_REFRESH
: A partial refresh of some rows of a dataset, based on the time window specified.
For more information on full and incremental refreshes, see Refreshing SPICE data in the Amazon QuickSight User Guide.
" } }, "Ingestions": { @@ -5282,6 +5379,16 @@ "refs": { } }, + "ListRefreshSchedulesRequest": { + "base": null, + "refs": { + } + }, + "ListRefreshSchedulesResponse": { + "base": null, + "refs": { + } + }, "ListTagsForResourceRequest": { "base": null, "refs": { @@ -5456,6 +5563,18 @@ "GeospatialCoordinateBounds$East": "The longitude of the east bound of the geospatial coordinate bounds.
" } }, + "LookbackWindow": { + "base": "The lookback window setup of an incremental refresh configuration.
", + "refs": { + "IncrementalRefresh$LookbackWindow": "The lookback window setup for an incremental refresh configuration.
" + } + }, + "LookbackWindowSizeUnit": { + "base": null, + "refs": { + "LookbackWindow$SizeUnit": "The size unit that is used for the lookback window column. Valid values for this structure are HOUR
, DAY
, and WEEK
.
Amazon S3 manifest file location.
", "refs": { @@ -6409,6 +6528,12 @@ "UploadSettings$StartFromRow": "A row number to start reading data from.
" } }, + "PositiveLong": { + "base": null, + "refs": { + "LookbackWindow$Size": "The lookback window column size.
" + } + }, "PostgreSqlParameters": { "base": "The parameters for PostgreSQL.
", "refs": { @@ -6490,6 +6615,16 @@ "ProjectOperation$ProjectedColumns": "Projected columns.
" } }, + "PutDataSetRefreshPropertiesRequest": { + "base": null, + "refs": { + } + }, + "PutDataSetRefreshPropertiesResponse": { + "base": null, + "refs": { + } + }, "Query": { "base": null, "refs": { @@ -6684,6 +6819,39 @@ "ReferenceLineValueLabelConfiguration$RelativePosition": "The relative position of the value label. Choose one of the following options:
BEFORE_CUSTOM_LABEL
AFTER_CUSTOM_LABEL
The refresh configuration of a dataset.
", + "refs": { + "DataSetRefreshProperties$RefreshConfiguration": "The refresh configuration for a dataset.
" + } + }, + "RefreshFrequency": { + "base": "Specifies the interval between each scheduled refresh of a dataset.
", + "refs": { + "RefreshSchedule$ScheduleFrequency": "The frequency for the refresh schedule.
" + } + }, + "RefreshInterval": { + "base": null, + "refs": { + "RefreshFrequency$Interval": "The interval between scheduled refreshes. Valid values are as follows:
MINUTE15
: The dataset refreshes every 15 minutes. This value is only supported for incremental refreshes. This interval can only be used for one schedule per dataset.
MINUTE30
:The dataset refreshes every 30 minutes. This value is only supported for incremental refreshes. This interval can only be used for one schedule per dataset.
HOURLY
: The dataset refreshes every hour. This interval can only be used for one schedule per dataset.
DAILY
: The dataset refreshes every day.
WEEKLY
: The dataset refreshes every week.
MONTHLY
: The dataset refreshes every month.
The refresh schedule of a dataset.
", + "refs": { + "CreateRefreshScheduleRequest$Schedule": "The refresh schedule.
", + "DescribeRefreshScheduleResponse$RefreshSchedule": "The refresh schedule.
", + "RefreshSchedules$member": "A list of RefreshSchedule
objects.
The refresh schedule.
" + } + }, + "RefreshSchedules": { + "base": null, + "refs": { + "ListRefreshSchedulesResponse$RefreshSchedules": "The list of refresh schedules for the dataset.
" + } + }, "RegisterUserRequest": { "base": null, "refs": { @@ -6809,20 +6977,27 @@ "CreateDataSetResponse$IngestionId": "The ID of the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE.
", "CreateDataSourceRequest$DataSourceId": "An ID for the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "CreateDataSourceResponse$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", + "CreateRefreshScheduleRequest$DataSetId": "The ID of the dataset.
", "DataSet$DataSetId": "The ID of the dataset.
", "DataSetSummary$DataSetId": "The ID of the dataset.
", "DataSource$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "DataSourceSummary$DataSourceId": "The unique ID of the data source.
", + "DeleteDataSetRefreshPropertiesRequest$DataSetId": "The ID of the dataset.
", "DeleteDataSetRequest$DataSetId": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "DeleteDataSetResponse$DataSetId": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "DeleteDataSourceRequest$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "DeleteDataSourceResponse$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", + "DeleteRefreshScheduleRequest$DataSetId": "The ID of the dataset.
", "DescribeDataSetPermissionsRequest$DataSetId": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "DescribeDataSetPermissionsResponse$DataSetId": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", + "DescribeDataSetRefreshPropertiesRequest$DataSetId": "The ID of the dataset.
", "DescribeDataSetRequest$DataSetId": "The ID for the dataset that you want to create. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "DescribeDataSourcePermissionsRequest$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "DescribeDataSourcePermissionsResponse$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "DescribeDataSourceRequest$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", + "DescribeRefreshScheduleRequest$DataSetId": "The ID of the dataset.
", + "ListRefreshSchedulesRequest$DataSetId": "The ID of the dataset.
", + "PutDataSetRefreshPropertiesRequest$DataSetId": "The ID of the dataset.
", "UpdateDataSetPermissionsRequest$DataSetId": "The ID for the dataset whose permissions you want to update. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "UpdateDataSetPermissionsResponse$DataSetId": "The ID for the dataset whose permissions you want to update. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "UpdateDataSetRequest$DataSetId": "The ID for the dataset that you want to update. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", @@ -6831,7 +7006,8 @@ "UpdateDataSourcePermissionsRequest$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "UpdateDataSourcePermissionsResponse$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", "UpdateDataSourceRequest$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", - "UpdateDataSourceResponse$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
" + "UpdateDataSourceResponse$DataSourceId": "The ID of the data source. This ID is unique per Amazon Web Services Region for each Amazon Web Services account.
", + "UpdateRefreshScheduleRequest$DataSetId": "The ID of the dataset.
" } }, "ResourceName": { @@ -7051,6 +7227,18 @@ "RowLevelPermissionTagRuleList$member": null } }, + "RowLevelPermissionTagRuleConfiguration": { + "base": null, + "refs": { + "RowLevelPermissionTagRuleConfigurationList$member": null + } + }, + "RowLevelPermissionTagRuleConfigurationList": { + "base": null, + "refs": { + "RowLevelPermissionTagConfiguration$TagRuleConfigurations": "A list of tag configuration rules to apply to a dataset. All tag configurations have the OR condition. Tags within each tile will be joined (AND). At least one rule in this structure must have all tag values assigned to it to apply Row-level security (RLS) to the dataset.
" + } + }, "RowLevelPermissionTagRuleList": { "base": null, "refs": { @@ -7153,6 +7341,12 @@ "Visual$ScatterPlotVisual": "A scatter plot.
For more information, see Using scatter plots in the Amazon QuickSight User Guide.
" } }, + "ScheduleRefreshOnEntity": { + "base": "The refresh on entity for weekly or monthly schedules.
", + "refs": { + "RefreshFrequency$RefreshOnDay": "The day of the week that you want to schedule the refresh on. This value is required for weekly and monthly refresh intervals.
" + } + }, "ScrollBarOptions": { "base": "The visual display options for a data zoom scroll bar.
", "refs": { @@ -7434,6 +7628,7 @@ "base": null, "refs": { "RowLevelPermissionTagRule$TagKey": "The unique key for a tag.
", + "RowLevelPermissionTagRuleConfiguration$member": null, "SessionTag$Key": "The key for the tag.
" } }, @@ -7961,6 +8156,7 @@ "CreateIAMPolicyAssignmentResponse$Status": "The HTTP status of the request.
", "CreateIngestionResponse$Status": "The HTTP status of the request.
", "CreateNamespaceResponse$Status": "The HTTP status of the request.
", + "CreateRefreshScheduleResponse$Status": "The HTTP status of the request.
", "CreateTemplateAliasResponse$Status": "The HTTP status of the request.
", "CreateTemplateResponse$Status": "The HTTP status of the request.
", "CreateThemeAliasResponse$Status": "The HTTP status of the request.
", @@ -7969,6 +8165,7 @@ "DeleteAccountSubscriptionResponse$Status": "The HTTP status of the request.
", "DeleteAnalysisResponse$Status": "The HTTP status of the request.
", "DeleteDashboardResponse$Status": "The HTTP status of the request.
", + "DeleteDataSetRefreshPropertiesResponse$Status": "The HTTP status of the request.
", "DeleteDataSetResponse$Status": "The HTTP status of the request.
", "DeleteDataSourceResponse$Status": "The HTTP status of the request.
", "DeleteFolderMembershipResponse$Status": "The HTTP status of the request.
", @@ -7977,6 +8174,7 @@ "DeleteGroupResponse$Status": "The HTTP status of the request.
", "DeleteIAMPolicyAssignmentResponse$Status": "The HTTP status of the request.
", "DeleteNamespaceResponse$Status": "The HTTP status of the request.
", + "DeleteRefreshScheduleResponse$Status": "The HTTP status of the request.
", "DeleteTemplateAliasResponse$Status": "The HTTP status of the request.
", "DeleteTemplateResponse$Status": "The HTTP status of the request.
", "DeleteThemeAliasResponse$Status": "The HTTP status of the request.
", @@ -7993,6 +8191,7 @@ "DescribeDashboardPermissionsResponse$Status": "The HTTP status of the request.
", "DescribeDashboardResponse$Status": "The HTTP status of this request.
", "DescribeDataSetPermissionsResponse$Status": "The HTTP status of the request.
", + "DescribeDataSetRefreshPropertiesResponse$Status": "The HTTP status of the request.
", "DescribeDataSetResponse$Status": "The HTTP status of the request.
", "DescribeDataSourcePermissionsResponse$Status": "The HTTP status of the request.
", "DescribeDataSourceResponse$Status": "The HTTP status of the request.
", @@ -8005,6 +8204,7 @@ "DescribeIngestionResponse$Status": "The HTTP status of the request.
", "DescribeIpRestrictionResponse$Status": "The HTTP status of the request.
", "DescribeNamespaceResponse$Status": "The HTTP status of the request.
", + "DescribeRefreshScheduleResponse$Status": "The HTTP status of the request.
", "DescribeTemplateAliasResponse$Status": "The HTTP status of the request.
", "DescribeTemplateDefinitionResponse$Status": "The HTTP status of the request.
", "DescribeTemplatePermissionsResponse$Status": "The HTTP status of the request.
", @@ -8030,6 +8230,7 @@ "ListIAMPolicyAssignmentsResponse$Status": "The HTTP status of the request.
", "ListIngestionsResponse$Status": "The HTTP status of the request.
", "ListNamespacesResponse$Status": "The HTTP status of the request.
", + "ListRefreshSchedulesResponse$Status": "The HTTP status of the request.
", "ListTagsForResourceResponse$Status": "The HTTP status of the request.
", "ListTemplateAliasesResponse$Status": "The HTTP status of the request.
", "ListTemplateVersionsResponse$Status": "The HTTP status of the request.
", @@ -8039,6 +8240,7 @@ "ListThemesResponse$Status": "The HTTP status of the request.
", "ListUserGroupsResponse$Status": "The HTTP status of the request.
", "ListUsersResponse$Status": "The HTTP status of the request.
", + "PutDataSetRefreshPropertiesResponse$Status": "The HTTP status of the request.
", "RegisterUserResponse$Status": "The HTTP status of the request.
", "RestoreAnalysisResponse$Status": "The HTTP status of the request.
", "SearchAnalysesResponse$Status": "The HTTP status of the request.
", @@ -8066,6 +8268,7 @@ "UpdateIAMPolicyAssignmentResponse$Status": "The HTTP status of the request.
", "UpdateIpRestrictionResponse$Status": "The HTTP status of the request.
", "UpdatePublicSharingSettingsResponse$Status": "The HTTP status of the request.
", + "UpdateRefreshScheduleResponse$Status": "The HTTP status of the request.
", "UpdateTemplateAliasResponse$Status": "The HTTP status of the request.
", "UpdateTemplatePermissionsResponse$Status": "The HTTP status of the request.
", "UpdateTemplateResponse$Status": "The HTTP status of the request.
", @@ -8128,6 +8331,8 @@ "CreateIngestionResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "CreateNamespaceResponse$CapacityRegion": "The Amazon Web Services Region; that you want to use for the free SPICE capacity for the new namespace. This is set to the region that you run CreateNamespace in.
", "CreateNamespaceResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "CreateRefreshScheduleResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "CreateRefreshScheduleResponse$ScheduleId": "The ID of the refresh schedule.
", "CreateTemplateAliasResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "CreateTemplateResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "CreateThemeAliasResponse$RequestId": "The Amazon Web Services request ID for this operation.
", @@ -8141,6 +8346,7 @@ "DeleteAccountSubscriptionResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteAnalysisResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteDashboardResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "DeleteDataSetRefreshPropertiesResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteDataSetResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteDataSourceResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteFolderMembershipResponse$RequestId": "The Amazon Web Services request ID for this operation.
", @@ -8149,6 +8355,9 @@ "DeleteGroupResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteIAMPolicyAssignmentResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteNamespaceResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "DeleteRefreshScheduleRequest$ScheduleId": "The ID of the refresh schedule.
", + "DeleteRefreshScheduleResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "DeleteRefreshScheduleResponse$ScheduleId": "The ID of the refresh schedule.
", "DeleteTemplateAliasResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteTemplateResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DeleteThemeAliasResponse$RequestId": "The Amazon Web Services request ID for this operation.
", @@ -8166,6 +8375,7 @@ "DescribeDashboardPermissionsResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeDashboardResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeDataSetPermissionsResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "DescribeDataSetRefreshPropertiesResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeDataSetResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeDataSourcePermissionsResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeDataSourceResponse$RequestId": "The Amazon Web Services request ID for this operation.
", @@ -8179,6 +8389,8 @@ "DescribeIngestionResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeIpRestrictionResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeNamespaceResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "DescribeRefreshScheduleRequest$ScheduleId": "The ID of the refresh schedule.
", + "DescribeRefreshScheduleResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeTemplateAliasResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeTemplateDefinitionResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "DescribeTemplatePermissionsResponse$RequestId": "The Amazon Web Services request ID for this operation.
", @@ -8259,6 +8471,7 @@ "ListNamespacesRequest$NextToken": "A unique pagination token that can be used in a subsequent request. You will receive a pagination token in the response body of a previous ListNameSpaces
API call if there is more data that can be returned. To receive the data, make another ListNamespaces
API call with the returned token to retrieve the next page of data. Each token is valid for 24 hours. If you try to make a ListNamespaces
API call with an expired token, you will receive a HTTP 400 InvalidNextTokenException
error.
A unique pagination token that can be used in a subsequent request. Receiving NextToken
in your response inticates that there is more data that can be returned. To receive the data, make another ListNamespaces
API call with the returned token to retrieve the next page of data. Each token is valid for 24 hours. If you try to make a ListNamespaces
API call with an expired token, you will receive a HTTP 400 InvalidNextTokenException
error.
The Amazon Web Services request ID for this operation.
", + "ListRefreshSchedulesResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "ListTagsForResourceResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "ListTemplateAliasesRequest$NextToken": "The token for the next set of results, or null if there are no more results.
", "ListTemplateAliasesResponse$RequestId": "The Amazon Web Services request ID for this operation.
", @@ -8284,6 +8497,7 @@ "ListUsersRequest$NextToken": "A pagination token that can be used in a subsequent request.
", "ListUsersResponse$NextToken": "A pagination token that can be used in a subsequent request.
", "ListUsersResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "LookbackWindow$ColumnName": "The name of the lookback window column.
", "MaximumMinimumComputation$Name": "The name of a computation.
", "MetricComparisonComputation$Name": "The name of a computation.
", "NamespaceError$Message": "The message for the error.
", @@ -8296,10 +8510,14 @@ "PreconditionNotMetException$RequestId": "The Amazon Web Services request ID for this request.
", "PrincipalList$member": null, "ProjectedColumnList$member": null, + "PutDataSetRefreshPropertiesResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "QueueInfo$WaitingOnIngestion": "The ID of the queued ingestion.
", "QueueInfo$QueuedIngestion": "The ID of the ongoing ingestion. The queued ingestion is waiting for the ongoing ingestion to complete.
", "QuickSightUserNotFoundException$Message": null, "QuickSightUserNotFoundException$RequestId": "The Amazon Web Services request ID for this request.
", + "RefreshFrequency$Timezone": "The timezone that you want the refresh schedule to use. The timezone ID must match a corresponding ID found on java.util.time.getAvailableIDs()
.
The time of day that you want the datset to refresh. This value is expressed in HH:MM format. This field is not required for schedules that refresh hourly.
", + "RefreshSchedule$ScheduleId": "An identifier for the refresh schedule.
", "RegisterUserRequest$Email": "The email address of the user that you want to register.
", "RegisterUserRequest$IamArn": "The ARN of the IAM user or role that you are registering with Amazon QuickSight.
", "RegisterUserRequest$ExternalLoginFederationProviderType": "The type of supported external login provider that provides identity to let a user federate into Amazon QuickSight with an associated Identity and Access Management(IAM) role. The type of supported external login provider can be one of the following.
COGNITO
: Amazon Cognito. The provider URL is cognito-identity.amazonaws.com. When choosing the COGNITO
provider type, don’t use the \"CustomFederationProviderUrl\" parameter which is only needed when the external provider is custom.
CUSTOM_OIDC
: Custom OpenID Connect (OIDC) provider. When choosing CUSTOM_OIDC
type, use the CustomFederationProviderUrl
parameter to provide the custom OIDC provider URL.
The Amazon Web Services request ID for this operation.
", "UpdateIpRestrictionResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "UpdatePublicSharingSettingsResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "UpdateRefreshScheduleResponse$RequestId": "The Amazon Web Services request ID for this operation.
", + "UpdateRefreshScheduleResponse$ScheduleId": "The ID of the refresh schedule.
", "UpdateTemplateAliasResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "UpdateTemplatePermissionsResponse$RequestId": "The Amazon Web Services request ID for this operation.
", "UpdateTemplateResponse$RequestId": "The Amazon Web Services request ID for this operation.
", @@ -9116,6 +9336,7 @@ "FolderSummary$CreatedTime": "The time that the folder was created.
", "FolderSummary$LastUpdatedTime": "The time that the folder was last updated.
", "Ingestion$CreatedTime": "The time that this ingestion started.
", + "RefreshSchedule$StartAfterDateTime": "Time after which the refresh schedule can be started, expressed in YYYY-MM-DDTHH:MM:SS
format.
Time when this was last updated.
", "Template$CreatedTime": "Time when this was created.
", "TemplateSummary$CreatedTime": "The last time that this template was created.
", @@ -9560,6 +9781,16 @@ "refs": { } }, + "UpdateRefreshScheduleRequest": { + "base": null, + "refs": { + } + }, + "UpdateRefreshScheduleResponse": { + "base": null, + "refs": { + } + }, "UpdateResourcePermissionList": { "base": null, "refs": { diff --git a/models/apis/quicksight/2018-04-01/endpoint-tests-1.json b/models/apis/quicksight/2018-04-01/endpoint-tests-1.json index 1a754c4abba..726ca1da133 100644 --- a/models/apis/quicksight/2018-04-01/endpoint-tests-1.json +++ b/models/apis/quicksight/2018-04-01/endpoint-tests-1.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "api", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "api", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -347,8 +347,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -360,8 +371,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -373,8 +395,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -386,8 +419,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -399,8 +443,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -412,8 +456,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -425,8 +469,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -437,8 +481,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -449,10 +493,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/redshift-data/2019-12-20/docs-2.json b/models/apis/redshift-data/2019-12-20/docs-2.json index 0d87e3173c7..288ef238ecf 100644 --- a/models/apis/redshift-data/2019-12-20/docs-2.json +++ b/models/apis/redshift-data/2019-12-20/docs-2.json @@ -2,16 +2,16 @@ "version": "2.0", "service": "You can use the Amazon Redshift Data API to run queries on Amazon Redshift tables. You can run SQL statements, which are committed if the statement succeeds.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", "operations": { - "BatchExecuteStatement": "Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. When connecting to a serverless workgroup, specify the Amazon Resource Name (ARN) of the secret and the database name.
Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required. When connecting to a serverless workgroup, specify the workgroup name and database name. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", + "BatchExecuteStatement": "Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, provide the secret-arn
of a secret stored in Secrets Manager which has username
and password
. The specified secret contains credentials to connect to the database
you specify. When you are connecting to a cluster, you also supply the database name, If you provide a cluster identifier (dbClusterIdentifier
), it must match the cluster identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database name.
Temporary credentials - when connecting to your data warehouse, choose one of the following options:
When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift:GetClusterCredentialsWithIAM
operation is required.
When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", "CancelStatement": "Cancels a running query. To be canceled, a query must be running.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", "DescribeStatement": "Describes the details about a specific instance when a query was run by the Amazon Redshift Data API. The information includes when the query started, when it finished, the query status, the number of rows returned, and the SQL statement.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", - "DescribeTable": "Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. When connecting to a serverless workgroup, specify the Amazon Resource Name (ARN) of the secret and the database name.
Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required. When connecting to a serverless workgroup, specify the workgroup name and database name. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", - "ExecuteStatement": "Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. When connecting to a serverless workgroup, specify the Amazon Resource Name (ARN) of the secret and the database name.
Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required. When connecting to a serverless workgroup, specify the workgroup name and database name. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", + "DescribeTable": "Describes the detailed information about a table from metadata in the cluster. The information includes its columns. A token is returned to page through the column list. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, provide the secret-arn
of a secret stored in Secrets Manager which has username
and password
. The specified secret contains credentials to connect to the database
you specify. When you are connecting to a cluster, you also supply the database name, If you provide a cluster identifier (dbClusterIdentifier
), it must match the cluster identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database name.
Temporary credentials - when connecting to your data warehouse, choose one of the following options:
When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift:GetClusterCredentialsWithIAM
operation is required.
When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", + "ExecuteStatement": "Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, provide the secret-arn
of a secret stored in Secrets Manager which has username
and password
. The specified secret contains credentials to connect to the database
you specify. When you are connecting to a cluster, you also supply the database name, If you provide a cluster identifier (dbClusterIdentifier
), it must match the cluster identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database name.
Temporary credentials - when connecting to your data warehouse, choose one of the following options:
When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift:GetClusterCredentialsWithIAM
operation is required.
When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", "GetStatementResult": "Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement results.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", - "ListDatabases": "List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. When connecting to a serverless workgroup, specify the Amazon Resource Name (ARN) of the secret and the database name.
Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required. When connecting to a serverless workgroup, specify the workgroup name and database name. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", - "ListSchemas": "Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. When connecting to a serverless workgroup, specify the Amazon Resource Name (ARN) of the secret and the database name.
Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required. When connecting to a serverless workgroup, specify the workgroup name and database name. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", + "ListDatabases": "List the databases in a cluster. A token is returned to page through the database list. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, provide the secret-arn
of a secret stored in Secrets Manager which has username
and password
. The specified secret contains credentials to connect to the database
you specify. When you are connecting to a cluster, you also supply the database name, If you provide a cluster identifier (dbClusterIdentifier
), it must match the cluster identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database name.
Temporary credentials - when connecting to your data warehouse, choose one of the following options:
When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift:GetClusterCredentialsWithIAM
operation is required.
When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", + "ListSchemas": "Lists the schemas in a database. A token is returned to page through the schema list. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, provide the secret-arn
of a secret stored in Secrets Manager which has username
and password
. The specified secret contains credentials to connect to the database
you specify. When you are connecting to a cluster, you also supply the database name, If you provide a cluster identifier (dbClusterIdentifier
), it must match the cluster identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database name.
Temporary credentials - when connecting to your data warehouse, choose one of the following options:
When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift:GetClusterCredentialsWithIAM
operation is required.
When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", "ListStatements": "List of SQL statements. By default, only finished statements are shown. A token is returned to page through the statement list.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
", - "ListTables": "List the tables in a database. If neither SchemaPattern
nor TablePattern
are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. When connecting to a serverless workgroup, specify the Amazon Resource Name (ARN) of the secret and the database name.
Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required. When connecting to a serverless workgroup, specify the workgroup name and database name. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
" + "ListTables": "List the tables in a database. If neither SchemaPattern
nor TablePattern
are specified, then all tables in the database are returned. A token is returned to page through the table list. Depending on the authorization method, use one of the following combinations of request parameters:
Secrets Manager - when connecting to a cluster, provide the secret-arn
of a secret stored in Secrets Manager which has username
and password
. The specified secret contains credentials to connect to the database
you specify. When you are connecting to a cluster, you also supply the database name, If you provide a cluster identifier (dbClusterIdentifier
), it must match the cluster identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database name.
Temporary credentials - when connecting to your data warehouse, choose one of the following options:
When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift-serverless:GetCredentials
operation is required.
When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo
has the database user name IAM:foo
. Also, permission to call the redshift:GetClusterCredentialsWithIAM
operation is required.
When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the database user name. Also, permission to call the redshift:GetClusterCredentials
operation is required.
For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.
" }, "shapes": { "ActiveStatementsExceededException": { @@ -408,7 +408,7 @@ "BatchExecuteStatementException$Message": null, "BatchExecuteStatementException$StatementId": "Statement identifier of the exception.
", "BatchExecuteStatementInput$Database": "The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.
", - "BatchExecuteStatementInput$DbUser": "The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.
", + "BatchExecuteStatementInput$DbUser": "The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.
", "BatchExecuteStatementOutput$Database": "The name of the database.
", "BatchExecuteStatementOutput$DbUser": "The database user name.
", "ColumnMetadata$columnDefault": "The default value of the column.
", @@ -425,7 +425,7 @@ "DescribeStatementResponse$Error": "The error message from the cluster if the SQL statement encountered an error while running.
", "DescribeTableRequest$ConnectedDatabase": "A database name. The connected database is specified when you connect with your authentication credentials.
", "DescribeTableRequest$Database": "The name of the database that contains the tables to be described. If ConnectedDatabase
is not specified, this is also the database to connect to with your authentication credentials.
The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.
", + "DescribeTableRequest$DbUser": "The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.
", "DescribeTableRequest$NextToken": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.
", "DescribeTableRequest$Schema": "The schema that contains the table. If no schema is specified, then matching tables for all schemas are returned.
", "DescribeTableRequest$Table": "The table name. If no table is specified, then all tables for all matching schemas are returned. If no table and no schema is specified, then all tables for all schemas in the database are returned
", @@ -434,7 +434,7 @@ "ExecuteStatementException$Message": "The exception message.
", "ExecuteStatementException$StatementId": "Statement identifier of the exception.
", "ExecuteStatementInput$Database": "The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.
", - "ExecuteStatementInput$DbUser": "The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.
", + "ExecuteStatementInput$DbUser": "The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.
", "ExecuteStatementOutput$Database": "The name of the database.
", "ExecuteStatementOutput$DbUser": "The database user name.
", "Field$stringValue": "A value of the string data type.
", @@ -442,12 +442,12 @@ "GetStatementResultResponse$NextToken": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.
", "InternalServerException$Message": "The exception message.
", "ListDatabasesRequest$Database": "The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.
", - "ListDatabasesRequest$DbUser": "The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.
", + "ListDatabasesRequest$DbUser": "The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.
", "ListDatabasesRequest$NextToken": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.
", "ListDatabasesResponse$NextToken": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.
", "ListSchemasRequest$ConnectedDatabase": "A database name. The connected database is specified when you connect with your authentication credentials.
", "ListSchemasRequest$Database": "The name of the database that contains the schemas to list. If ConnectedDatabase
is not specified, this is also the database to connect to with your authentication credentials.
The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.
", + "ListSchemasRequest$DbUser": "The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.
", "ListSchemasRequest$NextToken": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.
", "ListSchemasRequest$SchemaPattern": "A pattern to filter results by schema name. Within a schema pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only schema name entries matching the search pattern are returned.
", "ListSchemasResponse$NextToken": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.
", @@ -455,7 +455,7 @@ "ListStatementsResponse$NextToken": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.
", "ListTablesRequest$ConnectedDatabase": "A database name. The connected database is specified when you connect with your authentication credentials.
", "ListTablesRequest$Database": "The name of the database that contains the tables to list. If ConnectedDatabase
is not specified, this is also the database to connect to with your authentication credentials.
The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.
", + "ListTablesRequest$DbUser": "The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.
", "ListTablesRequest$NextToken": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.
", "ListTablesRequest$SchemaPattern": "A pattern to filter results by schema name. Within a schema pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only schema name entries matching the search pattern are returned. If SchemaPattern
is not specified, then all tables that match TablePattern
are returned. If neither SchemaPattern
or TablePattern
are specified, then all tables are returned.
A pattern to filter results by table name. Within a table pattern, \"%\" means match any substring of 0 or more characters and \"_\" means match any one character. Only table name entries matching the search pattern are returned. If TablePattern
is not specified, then all tables that match SchemaPattern
are returned. If neither SchemaPattern
or TablePattern
are specified, then all tables are returned.
The type of provisioning artifact.
CLOUD_FORMATION_TEMPLATE
- CloudFormation template
MARKETPLACE_AMI
- Amazon Web Services Marketplace AMI
MARKETPLACE_CAR
- Amazon Web Services Marketplace Clusters and Amazon Web Services Resources
The type of provisioning artifact.
CLOUD_FORMATION_TEMPLATE
- CloudFormation template
MARKETPLACE_AMI
- Amazon Web Services Marketplace AMI
MARKETPLACE_CAR
- Amazon Web Services Marketplace Clusters and Amazon Web Services Resources
The type of provisioning artifact.
CLOUD_FORMATION_TEMPLATE
- CloudFormation template
MARKETPLACE_AMI
- Amazon Web Services Marketplace AMI
MARKETPLACE_CAR
- Amazon Web Services Marketplace Clusters and Amazon Web Services Resources
TERRAFORM_OPEN_SOURCE
- Terraform open source configuration file