diff --git a/CHANGELOG.md b/CHANGELOG.md index 0bec9930189..ba5ffe3d217 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,35 @@ +Release v1.35.35 (2020-11-24) +=== + +### Service Client Updates +* `service/appflow`: Updates service API and documentation +* `service/batch`: Updates service API and documentation + * Add Ec2Configuration in ComputeEnvironment.ComputeResources. Use in CreateComputeEnvironment API to enable AmazonLinux2 support. +* `service/cloudformation`: Updates service API and documentation + * Adds support for the new Modules feature for CloudFormation. A module encapsulates one or more resources and their respective configurations for reuse across your organization. +* `service/cloudtrail`: Updates service API and documentation + * CloudTrail now includes advanced event selectors, which give you finer-grained control over the events that are logged to your trail. +* `service/codebuild`: Updates service API and documentation + * Adding GetReportGroupTrend API for Test Reports. +* `service/cognito-idp`: Updates service API and documentation +* `service/comprehend`: Updates service API, documentation, and paginators +* `service/elasticbeanstalk`: Updates service API and documentation + * Updates the Integer constraint of DescribeEnvironmentManagedActionHistory's MaxItems parameter to [1, 100]. +* `service/fsx`: Updates service API and documentation +* `service/gamelift`: Updates service API and documentation + * GameLift FlexMatch is now available as a standalone matchmaking solution. FlexMatch now provides customizable matchmaking for games hosted peer-to-peer, on-premises, or on cloud compute primitives. +* `service/iotsitewise`: Updates service API and documentation +* `service/lex-models`: Updates service API +* `service/mediaconvert`: Updates service API and documentation + * AWS Elemental MediaConvert SDK has added support for Vorbis and Opus audio in OGG/OGA containers. +* `service/mwaa`: Adds new service +* `service/quicksight`: Updates service API and documentation + * Support for embedding without user registration. New enum EmbeddingIdentityType. A potential breaking change. Affects code that refers IdentityType enum type directly instead of literal string value. +* `service/states`: Updates service API and documentation + * This release of the AWS Step Functions SDK introduces support for Synchronous Express Workflows +* `service/timestream-write`: Updates service API and documentation +* `service/transcribe-streaming`: Updates service API and documentation + Release v1.35.34 (2020-11-23) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index e300d11724c..21b8010fa84 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -5676,6 +5676,7 @@ var awsPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -7339,6 +7340,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "ram": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ diff --git a/aws/version.go b/aws/version.go index cb5d91e2533..3bfb31c995e 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.35.34" +const SDKVersion = "1.35.35" diff --git a/models/apis/appflow/2020-08-23/api-2.json b/models/apis/appflow/2020-08-23/api-2.json index 652e7ec030e..545b4cf027b 100644 --- a/models/apis/appflow/2020-08-23/api-2.json +++ b/models/apis/appflow/2020-08-23/api-2.json @@ -497,7 +497,8 @@ "Trendmicro":{"shape":"TrendmicroMetadata"}, "Veeva":{"shape":"VeevaMetadata"}, "Zendesk":{"shape":"ZendeskMetadata"}, - "EventBridge":{"shape":"EventBridgeMetadata"} + "EventBridge":{"shape":"EventBridgeMetadata"}, + "Upsolver":{"shape":"UpsolverMetadata"} } }, "ConnectorOAuthRequest":{ @@ -637,7 +638,8 @@ "Infornexus", "Amplitude", "Veeva", - "EventBridge" + "EventBridge", + "Upsolver" ] }, "ConnectorTypeList":{ @@ -896,7 +898,8 @@ "S3":{"shape":"S3DestinationProperties"}, "Salesforce":{"shape":"SalesforceDestinationProperties"}, "Snowflake":{"shape":"SnowflakeDestinationProperties"}, - "EventBridge":{"shape":"EventBridgeDestinationProperties"} + "EventBridge":{"shape":"EventBridgeDestinationProperties"}, + "Upsolver":{"shape":"UpsolverDestinationProperties"} } }, "DestinationField":{ @@ -2279,6 +2282,38 @@ "max":256, "pattern":"\\S+" }, + "UpsolverBucketName":{ + "type":"string", + "max":63, + "min":16, + "pattern":"^(upsolver-appflow)\\S*" + }, + "UpsolverDestinationProperties":{ + "type":"structure", + "required":[ + "bucketName", + "s3OutputFormatConfig" + ], + "members":{ + "bucketName":{"shape":"UpsolverBucketName"}, + "bucketPrefix":{"shape":"BucketPrefix"}, + "s3OutputFormatConfig":{"shape":"UpsolverS3OutputFormatConfig"} + } + }, + "UpsolverMetadata":{ + "type":"structure", + "members":{ + } + }, + "UpsolverS3OutputFormatConfig":{ + "type":"structure", + "required":["prefixConfig"], + "members":{ + "fileType":{"shape":"FileType"}, + "prefixConfig":{"shape":"PrefixConfig"}, + "aggregationConfig":{"shape":"AggregationConfig"} + } + }, "Username":{ "type":"string", "max":512, diff --git a/models/apis/appflow/2020-08-23/docs-2.json b/models/apis/appflow/2020-08-23/docs-2.json index 887e80925b0..8c2a62126fa 100644 --- a/models/apis/appflow/2020-08-23/docs-2.json +++ b/models/apis/appflow/2020-08-23/docs-2.json @@ -56,7 +56,8 @@ "AggregationConfig": { "base": "

The aggregation settings that you can use to customize the output format of your flow data.

", "refs": { - "S3OutputFormatConfig$aggregationConfig": null + "S3OutputFormatConfig$aggregationConfig": null, + "UpsolverS3OutputFormatConfig$aggregationConfig": null } }, "AggregationType": { @@ -170,7 +171,8 @@ "S3DestinationProperties$bucketPrefix": "

The object key for the destination bucket in which Amazon AppFlow places the files.

", "S3SourceProperties$bucketPrefix": "

The object key for the Amazon S3 bucket in which the source files are stored.

", "SnowflakeConnectorProfileProperties$bucketPrefix": "

The bucket path that refers to the Amazon S3 bucket associated with Snowflake.

", - "SnowflakeDestinationProperties$bucketPrefix": "

The object key for the destination bucket in which Amazon AppFlow places the files.

" + "SnowflakeDestinationProperties$bucketPrefix": "

The object key for the destination bucket in which Amazon AppFlow places the files.

", + "UpsolverDestinationProperties$bucketPrefix": "

The object key for the destination Upsolver Amazon S3 bucket in which Amazon AppFlow places the files.

" } }, "ClientCredentialsArn": { @@ -696,7 +698,8 @@ "FileType": { "base": null, "refs": { - "S3OutputFormatConfig$fileType": "

Indicates the file type that Amazon AppFlow places in the Amazon S3 bucket.

" + "S3OutputFormatConfig$fileType": "

Indicates the file type that Amazon AppFlow places in the Amazon S3 bucket.

", + "UpsolverS3OutputFormatConfig$fileType": "

Indicates the file type that Amazon AppFlow places in the Upsolver Amazon S3 bucket.

" } }, "FilterOperatorList": { @@ -1063,7 +1066,8 @@ "PrefixConfig": { "base": "

Determines the prefix that Amazon AppFlow applies to the destination folder name. You can name your destination folders according to the flow frequency and date.

", "refs": { - "S3OutputFormatConfig$prefixConfig": "

Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date.

" + "S3OutputFormatConfig$prefixConfig": "

Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date.

", + "UpsolverS3OutputFormatConfig$prefixConfig": null } }, "PrefixFormat": { @@ -1220,7 +1224,7 @@ "ScheduleExpression": { "base": null, "refs": { - "ScheduledTriggerProperties$scheduleExpression": "

The scheduling expression that determines when and how often the rule runs.

" + "ScheduledTriggerProperties$scheduleExpression": "

The scheduling expression that determines the rate at which the schedule will run, for example rate(5minutes).

" } }, "ScheduleFrequencyType": { @@ -1613,6 +1617,30 @@ "FlowDefinition$lastUpdatedBy": "

Specifies the account user name that most recently updated the flow.

" } }, + "UpsolverBucketName": { + "base": null, + "refs": { + "UpsolverDestinationProperties$bucketName": "

The Upsolver Amazon S3 bucket name in which Amazon AppFlow places the transferred data.

" + } + }, + "UpsolverDestinationProperties": { + "base": "

The properties that are applied when Upsolver is used as a destination.

", + "refs": { + "DestinationConnectorProperties$Upsolver": "

The properties required to query Upsolver.

" + } + }, + "UpsolverMetadata": { + "base": "

The connector metadata specific to Upsolver.

", + "refs": { + "ConnectorMetadata$Upsolver": "

The connector metadata specific to Upsolver.

" + } + }, + "UpsolverS3OutputFormatConfig": { + "base": "

The configuration that determines how Amazon AppFlow formats the flow output data when Upsolver is used as the destination.

", + "refs": { + "UpsolverDestinationProperties$s3OutputFormatConfig": "

The configuration that determines how data is formatted when Upsolver is used as the flow destination.

" + } + }, "Username": { "base": null, "refs": { diff --git a/models/apis/batch/2016-08-10/api-2.json b/models/apis/batch/2016-08-10/api-2.json index 251d43afcb4..c6508f9282e 100644 --- a/models/apis/batch/2016-08-10/api-2.json +++ b/models/apis/batch/2016-08-10/api-2.json @@ -439,7 +439,11 @@ "maxvCpus":{"shape":"Integer"}, "desiredvCpus":{"shape":"Integer"}, "instanceTypes":{"shape":"StringList"}, - "imageId":{"shape":"String"}, + "imageId":{ + "shape":"String", + "deprecated":true, + "deprecatedMessage":"This field is deprecated, use ec2Configuration[].imageIdOverride instead." + }, "subnets":{"shape":"StringList"}, "securityGroupIds":{"shape":"StringList"}, "ec2KeyPair":{"shape":"String"}, @@ -448,7 +452,8 @@ "placementGroup":{"shape":"String"}, "bidPercentage":{"shape":"Integer"}, "spotIamFleetRole":{"shape":"String"}, - "launchTemplate":{"shape":"LaunchTemplateSpecification"} + "launchTemplate":{"shape":"LaunchTemplateSpecification"}, + "ec2Configuration":{"shape":"Ec2ConfigurationList"} } }, "ComputeResourceUpdate":{ @@ -699,6 +704,18 @@ "type":"list", "member":{"shape":"Device"} }, + "Ec2Configuration":{ + "type":"structure", + "required":["imageType"], + "members":{ + "imageType":{"shape":"ImageType"}, + "imageIdOverride":{"shape":"ImageIdOverride"} + } + }, + "Ec2ConfigurationList":{ + "type":"list", + "member":{"shape":"Ec2Configuration"} + }, "EnvironmentVariables":{ "type":"list", "member":{"shape":"KeyValuePair"} @@ -723,6 +740,16 @@ "sourcePath":{"shape":"String"} } }, + "ImageIdOverride":{ + "type":"string", + "max":256, + "min":1 + }, + "ImageType":{ + "type":"string", + "max":256, + "min":1 + }, "Integer":{"type":"integer"}, "JQState":{ "type":"string", diff --git a/models/apis/batch/2016-08-10/docs-2.json b/models/apis/batch/2016-08-10/docs-2.json index ea8a4b07c24..1a4fa332ca5 100644 --- a/models/apis/batch/2016-08-10/docs-2.json +++ b/models/apis/batch/2016-08-10/docs-2.json @@ -312,6 +312,18 @@ "LinuxParameters$devices": "

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

" } }, + "Ec2Configuration": { + "base": "

Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If the Ec2Configuration is not specified, the default is ECS_AL1.

", + "refs": { + "Ec2ConfigurationList$member": null + } + }, + "Ec2ConfigurationList": { + "base": null, + "refs": { + "ComputeResource$ec2Configuration": "

Provides additional details used to selecting the AMI to use for instances in a compute environment.

" + } + }, "EnvironmentVariables": { "base": null, "refs": { @@ -338,6 +350,18 @@ "Volume$host": "

The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data is not guaranteed to persist after the containers associated with it stop running.

" } }, + "ImageIdOverride": { + "base": null, + "refs": { + "Ec2Configuration$imageIdOverride": "

The AMI ID used for instances launched in the compute environment that match the image type. This setting overrides the imageId set in the computeResource object.

" + } + }, + "ImageType": { + "base": null, + "refs": { + "Ec2Configuration$imageType": "

The image type to match with the instance type to pick an AMI. If the imageIdOverride parameter is not specified, then a recent Amazon ECS-optimized AMI will be used.

ECS_AL2

Amazon Linux 2− Default for all AWS Graviton-based instance families (for example, C6g, M6g, R6g, and T4g) and can be used for all non-GPU instance types.

ECS_AL2_NVIDIA

Amazon Linux 2 (GPU)−Default for all GPU instance families (for example P4 and G4) and can be used for all non-AWS Graviton-based instance types.

ECS_AL1

Amazon Linux−Default for all non-GPU, non-AWS-Graviton instance families. Amazon Linux is reaching the end-of-life of standard support. For more information, see Amazon Linux AMI.

" + } + }, "Integer": { "base": null, "refs": { @@ -725,7 +749,7 @@ "ComputeEnvironmentDetail$statusReason": "

A short, human-readable string to provide additional details about the current status of the compute environment.

", "ComputeEnvironmentDetail$serviceRole": "

The service role associated with the compute environment that allows AWS Batch to make calls to AWS API operations on your behalf.

", "ComputeEnvironmentOrder$computeEnvironment": "

The Amazon Resource Name (ARN) of the compute environment.

", - "ComputeResource$imageId": "

The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.

", + "ComputeResource$imageId": "

The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is overridden by the imageIdOverride member of the Ec2Configuration structure.

", "ComputeResource$ec2KeyPair": "

The Amazon EC2 key pair that is used for instances launched in the compute environment.

", "ComputeResource$instanceRole": "

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS Instance Role in the AWS Batch User Guide.

", "ComputeResource$placementGroup": "

The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

", diff --git a/models/apis/cloudformation/2010-05-15/api-2.json b/models/apis/cloudformation/2010-05-15/api-2.json index 439aa3b7c31..4afe324c00b 100644 --- a/models/apis/cloudformation/2010-05-15/api-2.json +++ b/models/apis/cloudformation/2010-05-15/api-2.json @@ -1987,6 +1987,7 @@ "Visibility":{"shape":"Visibility"}, "ProvisioningType":{"shape":"ProvisioningType"}, "DeprecatedStatus":{"shape":"DeprecatedStatus"}, + "Type":{"shape":"RegistryType"}, "MaxResults":{"shape":"MaxResults"}, "NextToken":{"shape":"NextToken"} } @@ -2015,6 +2016,7 @@ "LogGroupName":{"shape":"LogGroupName"} } }, + "LogicalIdHierarchy":{"type":"string"}, "LogicalResourceId":{"type":"string"}, "LogicalResourceIds":{ "type":"list", @@ -2037,6 +2039,13 @@ "min":1 }, "Metadata":{"type":"string"}, + "ModuleInfo":{ + "type":"structure", + "members":{ + "TypeHierarchy":{"shape":"TypeHierarchy"}, + "LogicalIdHierarchy":{"shape":"LogicalIdHierarchy"} + } + }, "MonitoringTimeInMinutes":{ "type":"integer", "max":180, @@ -2321,7 +2330,10 @@ }, "RegistryType":{ "type":"string", - "enum":["RESOURCE"] + "enum":[ + "RESOURCE", + "MODULE" + ] }, "Replacement":{ "type":"string", @@ -2366,7 +2378,8 @@ "Replacement":{"shape":"Replacement"}, "Scope":{"shape":"Scope"}, "Details":{"shape":"ResourceChangeDetails"}, - "ChangeSetId":{"shape":"ChangeSetId"} + "ChangeSetId":{"shape":"ChangeSetId"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "ResourceChangeDetail":{ @@ -2820,7 +2833,8 @@ "ResourceStatus":{"shape":"ResourceStatus"}, "ResourceStatusReason":{"shape":"ResourceStatusReason"}, "Description":{"shape":"Description"}, - "DriftInformation":{"shape":"StackResourceDriftInformation"} + "DriftInformation":{"shape":"StackResourceDriftInformation"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "StackResourceDetail":{ @@ -2842,7 +2856,8 @@ "ResourceStatusReason":{"shape":"ResourceStatusReason"}, "Description":{"shape":"Description"}, "Metadata":{"shape":"Metadata"}, - "DriftInformation":{"shape":"StackResourceDriftInformation"} + "DriftInformation":{"shape":"StackResourceDriftInformation"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "StackResourceDrift":{ @@ -2864,7 +2879,8 @@ "ActualProperties":{"shape":"Properties"}, "PropertyDifferences":{"shape":"PropertyDifferences"}, "StackResourceDriftStatus":{"shape":"StackResourceDriftStatus"}, - "Timestamp":{"shape":"Timestamp"} + "Timestamp":{"shape":"Timestamp"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "StackResourceDriftInformation":{ @@ -2921,7 +2937,8 @@ "LastUpdatedTimestamp":{"shape":"Timestamp"}, "ResourceStatus":{"shape":"ResourceStatus"}, "ResourceStatusReason":{"shape":"ResourceStatusReason"}, - "DriftInformation":{"shape":"StackResourceDriftInformationSummary"} + "DriftInformation":{"shape":"StackResourceDriftInformationSummary"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "StackResources":{ @@ -3301,11 +3318,12 @@ "max":1024, "pattern":"arn:aws[A-Za-z0-9-]{0,64}:cloudformation:[A-Za-z0-9-]{1,64}:([0-9]{12})?:type/.+" }, + "TypeHierarchy":{"type":"string"}, "TypeName":{ "type":"string", - "max":196, + "max":204, "min":10, - "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}" + "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}(::MODULE){0,1}" }, "TypeNotFoundException":{ "type":"structure", diff --git a/models/apis/cloudformation/2010-05-15/docs-2.json b/models/apis/cloudformation/2010-05-15/docs-2.json index 90d79becc53..cd81739832b 100644 --- a/models/apis/cloudformation/2010-05-15/docs-2.json +++ b/models/apis/cloudformation/2010-05-15/docs-2.json @@ -445,7 +445,7 @@ } }, "DeploymentTargets": { - "base": "

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization master account, even if the master account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

", + "base": "

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

", "refs": { "CreateStackInstancesInput$DeploymentTargets": "

[Service-managed permissions] The AWS Organizations accounts for which to create stack instances in the specified Regions.

You can specify Accounts or DeploymentTargets, but not both.

", "DeleteStackInstancesInput$DeploymentTargets": "

[Service-managed permissions] The AWS Organizations accounts from which to delete stack instances.

You can specify Accounts or DeploymentTargets, but not both.

", @@ -1033,6 +1033,12 @@ "RegisterTypeInput$LoggingConfig": "

Specifies logging configuration information for a type.

" } }, + "LogicalIdHierarchy": { + "base": null, + "refs": { + "ModuleInfo$LogicalIdHierarchy": "

A concantenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /.

In the following example, the resource was created from a module, moduleA, that is nested inside a parent module, moduleB.

moduleA/moduleB

For more information, see Referencing resources in a module in the CloudFormation User Guide.

" + } + }, "LogicalResourceId": { "base": null, "refs": { @@ -1089,6 +1095,16 @@ "StackResourceDetail$Metadata": "

The content of the Metadata attribute declared for the resource. For more information, see Metadata Attribute in the AWS CloudFormation User Guide.

" } }, + "ModuleInfo": { + "base": "

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

For more information on modules, see Using modules to encapsulate and reuse resource configurations in the CloudFormation User Guide.

", + "refs": { + "ResourceChange$ModuleInfo": "

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

", + "StackResource$ModuleInfo": "

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

", + "StackResourceDetail$ModuleInfo": "

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

", + "StackResourceDrift$ModuleInfo": "

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

", + "StackResourceSummary$ModuleInfo": "

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" + } + }, "MonitoringTimeInMinutes": { "base": null, "refs": { @@ -1470,6 +1486,7 @@ "DescribeTypeOutput$Type": "

The kind of type.

Currently the only valid value is RESOURCE.

", "ListTypeRegistrationsInput$Type": "

The kind of type.

Currently the only valid value is RESOURCE.

Conditional: You must specify either TypeName and Type, or Arn.

", "ListTypeVersionsInput$Type": "

The kind of the type.

Currently the only valid value is RESOURCE.

Conditional: You must specify either TypeName and Type, or Arn.

", + "ListTypesInput$Type": "

The type of extension.

", "RegisterTypeInput$Type": "

The kind of type.

Currently, the only valid value is RESOURCE.

", "SetTypeDefaultVersionInput$Type": "

The kind of type.

Conditional: You must specify either TypeName and Type, or Arn.

", "TypeSummary$Type": "

The kind of type.

", @@ -2433,6 +2450,12 @@ "TypeVersionSummary$Arn": "

The Amazon Resource Name (ARN) of the type version.

" } }, + "TypeHierarchy": { + "base": null, + "refs": { + "ModuleInfo$TypeHierarchy": "

A concantenated list of the the module type or types containing the resource. Module types are listed starting with the inner-most nested module, and separated by /.

In the following example, the resource was created from a module of type AWS::First::Example::MODULE, that is nested inside a parent module of type AWS::Second::Example::MODULE.

AWS::First::Example::MODULE/AWS::Second::Example::MODULE

" + } + }, "TypeName": { "base": null, "refs": { diff --git a/models/apis/cloudtrail/2013-11-01/api-2.json b/models/apis/cloudtrail/2013-11-01/api-2.json index da8b8d47ae9..0335ecdba41 100644 --- a/models/apis/cloudtrail/2013-11-01/api-2.json +++ b/models/apis/cloudtrail/2013-11-01/api-2.json @@ -393,6 +393,39 @@ "members":{ } }, + "AdvancedEventSelector":{ + "type":"structure", + "required":[ + "Name", + "FieldSelectors" + ], + "members":{ + "Name":{"shape":"SelectorName"}, + "FieldSelectors":{"shape":"AdvancedFieldSelectors"} + } + }, + "AdvancedEventSelectors":{ + "type":"list", + "member":{"shape":"AdvancedEventSelector"} + }, + "AdvancedFieldSelector":{ + "type":"structure", + "required":["Field"], + "members":{ + "Field":{"shape":"SelectorField"}, + "Equals":{"shape":"Operator"}, + "StartsWith":{"shape":"Operator"}, + "EndsWith":{"shape":"Operator"}, + "NotEquals":{"shape":"Operator"}, + "NotStartsWith":{"shape":"Operator"}, + "NotEndsWith":{"shape":"Operator"} + } + }, + "AdvancedFieldSelectors":{ + "type":"list", + "member":{"shape":"AdvancedFieldSelector"}, + "min":1 + }, "Boolean":{"type":"boolean"}, "ByteBuffer":{"type":"blob"}, "CloudTrailARNInvalidException":{ @@ -546,7 +579,8 @@ "type":"structure", "members":{ "TrailARN":{"shape":"String"}, - "EventSelectors":{"shape":"EventSelectors"} + "EventSelectors":{"shape":"EventSelectors"}, + "AdvancedEventSelectors":{"shape":"AdvancedEventSelectors"} } }, "GetInsightSelectorsRequest":{ @@ -889,6 +923,17 @@ }, "exception":true }, + "Operator":{ + "type":"list", + "member":{"shape":"OperatorValue"}, + "min":1 + }, + "OperatorValue":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".+" + }, "OrganizationNotInAllFeaturesModeException":{ "type":"structure", "members":{ @@ -916,20 +961,19 @@ }, "PutEventSelectorsRequest":{ "type":"structure", - "required":[ - "TrailName", - "EventSelectors" - ], + "required":["TrailName"], "members":{ "TrailName":{"shape":"String"}, - "EventSelectors":{"shape":"EventSelectors"} + "EventSelectors":{"shape":"EventSelectors"}, + "AdvancedEventSelectors":{"shape":"AdvancedEventSelectors"} } }, "PutEventSelectorsResponse":{ "type":"structure", "members":{ "TrailARN":{"shape":"String"}, - "EventSelectors":{"shape":"EventSelectors"} + "EventSelectors":{"shape":"EventSelectors"}, + "AdvancedEventSelectors":{"shape":"AdvancedEventSelectors"} } }, "PutInsightSelectorsRequest":{ @@ -1015,6 +1059,18 @@ }, "exception":true }, + "SelectorField":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"[\\w|\\d|\\.|_]+" + }, + "SelectorName":{ + "type":"string", + "max":1000, + "min":1, + "pattern":".+" + }, "StartLoggingRequest":{ "type":"structure", "required":["Name"], diff --git a/models/apis/cloudtrail/2013-11-01/docs-2.json b/models/apis/cloudtrail/2013-11-01/docs-2.json index 548cf6702d0..b8aed4b7067 100644 --- a/models/apis/cloudtrail/2013-11-01/docs-2.json +++ b/models/apis/cloudtrail/2013-11-01/docs-2.json @@ -13,7 +13,7 @@ "ListPublicKeys": "

Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.

CloudTrail uses different private/public key pairs per region. Each digest file is signed with a private key unique to its region. Therefore, when you validate a digest file from a particular region, you must look in the same region for its corresponding public key.

", "ListTags": "

Lists the tags for the trail in the current region.

", "ListTrails": "

Lists trails that are in the current account.

", - "LookupEvents": "

Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:

Lookup supports the following attributes for Insights events:

All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

The rate of lookup requests is limited to two per second per account. If this limit is exceeded, a throttling error occurs.

", + "LookupEvents": "

Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:

Lookup supports the following attributes for Insights events:

All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

The rate of lookup requests is limited to two per second, per account, per region. If this limit is exceeded, a throttling error occurs.

", "PutEventSelectors": "

Configures an event selector for your trail. Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events.

When an event occurs in your account, CloudTrail evaluates the event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

Example

  1. You create an event selector for a trail and specify that you want write-only events.

  2. The EC2 GetConsoleOutput and RunInstances API operations occur in your account.

  3. CloudTrail evaluates whether the events match your event selectors.

  4. The RunInstances is a write-only event and it matches your event selector. The trail logs the event.

  5. The GetConsoleOutput is a read-only event but it doesn't match your event selector. The trail doesn't log the event.

The PutEventSelectors operation must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown.

You can configure up to five event selectors for each trail. For more information, see Logging Data and Management Events for Trails and Limits in AWS CloudTrail in the AWS CloudTrail User Guide.

", "PutInsightSelectors": "

Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. In this release, only ApiCallRateInsight is supported as an Insights selector.

", "RemoveTags": "

Removes the specified tags from a trail.

", @@ -32,6 +32,32 @@ "refs": { } }, + "AdvancedEventSelector": { + "base": null, + "refs": { + "AdvancedEventSelectors$member": null + } + }, + "AdvancedEventSelectors": { + "base": null, + "refs": { + "GetEventSelectorsResponse$AdvancedEventSelectors": null, + "PutEventSelectorsRequest$AdvancedEventSelectors": null, + "PutEventSelectorsResponse$AdvancedEventSelectors": null + } + }, + "AdvancedFieldSelector": { + "base": null, + "refs": { + "AdvancedFieldSelectors$member": null + } + }, + "AdvancedFieldSelectors": { + "base": null, + "refs": { + "AdvancedEventSelector$FieldSelectors": null + } + }, "Boolean": { "base": null, "refs": { @@ -44,7 +70,7 @@ "CreateTrailResponse$LogFileValidationEnabled": "

Specifies whether log file integrity validation is enabled.

", "CreateTrailResponse$IsOrganizationTrail": "

Specifies whether the trail is an organization trail.

", "DescribeTrailsRequest$includeShadowTrails": "

Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region, or in the case of an organization trail, the replication of an organization trail in member accounts. If you do not include shadow trails, organization trails in a member account and region replication trails will not be returned. The default is true.

", - "EventSelector$IncludeManagementEvents": "

Specify if you want your event selector to include management events for your trail.

For more information, see Management Events in the AWS CloudTrail User Guide.

By default, the value is true.

", + "EventSelector$IncludeManagementEvents": "

Specify if you want your event selector to include management events for your trail.

For more information, see Management Events in the AWS CloudTrail User Guide.

By default, the value is true.

The first copy of management events is free. You are charged for additional copies of management events that you are logging on any subsequent trail in the same region. For more information about CloudTrail pricing, see AWS CloudTrail Pricing.

", "GetTrailStatusResponse$IsLogging": "

Whether the CloudTrail is currently logging AWS API calls.

", "Trail$IncludeGlobalServiceEvents": "

Set to True to include AWS API calls from AWS global services such as IAM. Otherwise, False.

", "Trail$IsMultiRegionTrail": "

Specifies whether the trail exists only in one region or exists in all regions.

", @@ -373,7 +399,7 @@ } }, "KmsKeyNotFoundException": { - "base": "

This exception is thrown when the KMS key does not exist, or when the S3 bucket and the KMS key are not in the same region.

", + "base": "

This exception is thrown when the KMS key does not exist, when the S3 bucket and the KMS key are not in the same region, or when the KMS key associated with the SNS topic either does not exist or is not in the same region.

", "refs": { } }, @@ -463,6 +489,23 @@ "refs": { } }, + "Operator": { + "base": null, + "refs": { + "AdvancedFieldSelector$Equals": null, + "AdvancedFieldSelector$StartsWith": null, + "AdvancedFieldSelector$EndsWith": null, + "AdvancedFieldSelector$NotEquals": null, + "AdvancedFieldSelector$NotStartsWith": null, + "AdvancedFieldSelector$NotEndsWith": null + } + }, + "OperatorValue": { + "base": null, + "refs": { + "Operator$member": null + } + }, "OrganizationNotInAllFeaturesModeException": { "base": "

This exception is thrown when AWS Organizations is not configured to support all features. All features must be enabled in AWS Organization to support creating an organization trail. For more information, see Prepare For Creating a Trail For Your Organization.

", "refs": { @@ -566,6 +609,18 @@ "refs": { } }, + "SelectorField": { + "base": null, + "refs": { + "AdvancedFieldSelector$Field": null + } + }, + "SelectorName": { + "base": null, + "refs": { + "AdvancedEventSelector$Name": null + } + }, "StartLoggingRequest": { "base": "

The request to CloudTrail to start logging AWS API calls for an account.

", "refs": { diff --git a/models/apis/codebuild/2016-10-06/api-2.json b/models/apis/codebuild/2016-10-06/api-2.json index de68230f3e1..1613c89c6a0 100644 --- a/models/apis/codebuild/2016-10-06/api-2.json +++ b/models/apis/codebuild/2016-10-06/api-2.json @@ -239,6 +239,19 @@ {"shape":"ResourceNotFoundException"} ] }, + "GetReportGroupTrend":{ + "name":"GetReportGroupTrend", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetReportGroupTrendInput"}, + "output":{"shape":"GetReportGroupTrendOutput"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "GetResourcePolicy":{ "name":"GetResourcePolicy", "http":{ @@ -1280,6 +1293,25 @@ "type":"list", "member":{"shape":"FilterGroup"} }, + "GetReportGroupTrendInput":{ + "type":"structure", + "required":[ + "reportGroupArn", + "trendField" + ], + "members":{ + "reportGroupArn":{"shape":"NonEmptyString"}, + "numOfReports":{"shape":"PageSize"}, + "trendField":{"shape":"ReportGroupTrendFieldType"} + } + }, + "GetReportGroupTrendOutput":{ + "type":"structure", + "members":{ + "stats":{"shape":"ReportGroupTrendStats"}, + "rawData":{"shape":"ReportGroupTrendRawDataList"} + } + }, "GetResourcePolicyInput":{ "type":"structure", "required":["resourceArn"], @@ -1941,6 +1973,32 @@ "DELETING" ] }, + "ReportGroupTrendFieldType":{ + "type":"string", + "enum":[ + "PASS_RATE", + "DURATION", + "TOTAL", + "LINE_COVERAGE", + "LINES_COVERED", + "LINES_MISSED", + "BRANCH_COVERAGE", + "BRANCHES_COVERED", + "BRANCHES_MISSED" + ] + }, + "ReportGroupTrendRawDataList":{ + "type":"list", + "member":{"shape":"ReportWithRawData"} + }, + "ReportGroupTrendStats":{ + "type":"structure", + "members":{ + "average":{"shape":"String"}, + "max":{"shape":"String"}, + "min":{"shape":"String"} + } + }, "ReportGroups":{ "type":"list", "member":{"shape":"ReportGroup"}, @@ -1976,6 +2034,13 @@ "CODE_COVERAGE" ] }, + "ReportWithRawData":{ + "type":"structure", + "members":{ + "reportArn":{"shape":"NonEmptyString"}, + "data":{"shape":"String"} + } + }, "Reports":{ "type":"list", "member":{"shape":"Report"}, diff --git a/models/apis/codebuild/2016-10-06/docs-2.json b/models/apis/codebuild/2016-10-06/docs-2.json index 180d1300f43..c84e05e2d8e 100644 --- a/models/apis/codebuild/2016-10-06/docs-2.json +++ b/models/apis/codebuild/2016-10-06/docs-2.json @@ -20,6 +20,7 @@ "DeleteWebhook": "

For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.

", "DescribeCodeCoverages": "

Retrieves one or more code coverage reports.

", "DescribeTestCases": "

Returns a list of details about test cases for a report.

", + "GetReportGroupTrend": null, "GetResourcePolicy": "

Gets a resource policy that is identified by its resource ARN.

", "ImportSourceCredentials": "

Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

", "InvalidateProjectCache": "

Resets the cache for a project.

", @@ -594,6 +595,16 @@ "Webhook$filterGroups": "

An array of arrays of WebhookFilter objects used to determine which webhooks are triggered. At least one WebhookFilter in the array must specify EVENT as its type.

For a build to be triggered, at least one filter group in the filterGroups array must pass. For a filter group to pass, each of its filters must pass.

" } }, + "GetReportGroupTrendInput": { + "base": null, + "refs": { + } + }, + "GetReportGroupTrendOutput": { + "base": null, + "refs": { + } + }, "GetResourcePolicyInput": { "base": null, "refs": { @@ -864,6 +875,7 @@ "DescribeCodeCoveragesInput$reportArn": "

The ARN of the report for which test cases are returned.

", "EnvironmentVariable$name": "

The name or key of the environment variable.

", "ExportedEnvironmentVariable$name": "

The name of this exported environment variable.

", + "GetReportGroupTrendInput$reportGroupArn": null, "GetResourcePolicyInput$resourceArn": "

The ARN of the resource that is associated with the resource policy.

", "GetResourcePolicyOutput$policy": "

The resource policy for the resource identified by the input ARN parameter.

", "Identifiers$member": null, @@ -891,6 +903,7 @@ "ReportArns$member": null, "ReportGroup$arn": "

The ARN of a ReportGroup.

", "ReportGroupArns$member": null, + "ReportWithRawData$reportArn": null, "RetryBuildBatchInput$id": "

Specifies the identifier of the batch build to restart.

", "RetryBuildInput$id": "

Specifies the identifier of the build to restart.

", "S3ReportExportConfig$bucket": "

The name of the S3 bucket where the raw data of a report are exported.

", @@ -942,6 +955,7 @@ "refs": { "DescribeCodeCoveragesInput$maxResults": "

The maximum number of results to return.

", "DescribeTestCasesInput$maxResults": "

The maximum number of paginated test cases returned per response. Use nextToken to iterate pages in the list of returned TestCase objects. The default value is 100.

", + "GetReportGroupTrendInput$numOfReports": null, "ListBuildBatchesForProjectInput$maxResults": "

The maximum number of results to return.

", "ListBuildBatchesInput$maxResults": "

The maximum number of results to return.

", "ListReportGroupsInput$maxResults": "

The maximum number of paginated report groups returned per response. Use nextToken to iterate pages in the list of returned ReportGroup objects. The default value is 100.

", @@ -1252,6 +1266,24 @@ "ReportGroup$status": null } }, + "ReportGroupTrendFieldType": { + "base": null, + "refs": { + "GetReportGroupTrendInput$trendField": null + } + }, + "ReportGroupTrendRawDataList": { + "base": null, + "refs": { + "GetReportGroupTrendOutput$rawData": null + } + }, + "ReportGroupTrendStats": { + "base": null, + "refs": { + "GetReportGroupTrendOutput$stats": null + } + }, "ReportGroups": { "base": null, "refs": { @@ -1285,6 +1317,12 @@ "ReportGroup$type": "

The type of the ReportGroup. The one valid value is TEST.

" } }, + "ReportWithRawData": { + "base": null, + "refs": { + "ReportGroupTrendRawDataList$member": null + } + }, "Reports": { "base": null, "refs": { @@ -1560,7 +1598,11 @@ "ProjectSourceVersion$sourceVersion": "

The source version for the corresponding source identifier. If specified, must be one of:

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

", "Report$name": "

The name of the report that was run.

", "Report$executionId": "

The ARN of the build run that generated this report.

", + "ReportGroupTrendStats$average": null, + "ReportGroupTrendStats$max": null, + "ReportGroupTrendStats$min": null, "ReportStatusCounts$key": null, + "ReportWithRawData$data": null, "ResolvedArtifact$location": "

The location of the artifact.

", "ResolvedArtifact$identifier": "

The identifier of the artifact.

", "RetryBuildBatchInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuildBatch request. The token is included in the RetryBuildBatch request and is valid for five minutes. If you repeat the RetryBuildBatch request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

", diff --git a/models/apis/cognito-idp/2016-04-18/api-2.json b/models/apis/cognito-idp/2016-04-18/api-2.json index 8a7c87b0ad3..68f22d09c8e 100644 --- a/models/apis/cognito-idp/2016-04-18/api-2.json +++ b/models/apis/cognito-idp/2016-04-18/api-2.json @@ -3056,6 +3056,36 @@ "CertificateArn":{"shape":"ArnType"} } }, + "CustomEmailLambdaVersionConfigType":{ + "type":"structure", + "required":[ + "LambdaVersion", + "LambdaArn" + ], + "members":{ + "LambdaVersion":{"shape":"CustomEmailSenderLambdaVersionType"}, + "LambdaArn":{"shape":"ArnType"} + } + }, + "CustomEmailSenderLambdaVersionType":{ + "type":"string", + "enum":["V1_0"] + }, + "CustomSMSLambdaVersionConfigType":{ + "type":"structure", + "required":[ + "LambdaVersion", + "LambdaArn" + ], + "members":{ + "LambdaVersion":{"shape":"CustomSMSSenderLambdaVersionType"}, + "LambdaArn":{"shape":"ArnType"} + } + }, + "CustomSMSSenderLambdaVersionType":{ + "type":"string", + "enum":["V1_0"] + }, "DateType":{"type":"timestamp"}, "DefaultEmailOptionType":{ "type":"string", @@ -3902,7 +3932,10 @@ "CreateAuthChallenge":{"shape":"ArnType"}, "VerifyAuthChallengeResponse":{"shape":"ArnType"}, "PreTokenGeneration":{"shape":"ArnType"}, - "UserMigration":{"shape":"ArnType"} + "UserMigration":{"shape":"ArnType"}, + "CustomSMSSender":{"shape":"CustomSMSLambdaVersionConfigType"}, + "CustomEmailSender":{"shape":"CustomEmailLambdaVersionConfigType"}, + "KMSKeyID":{"shape":"ArnType"} } }, "LimitExceededException":{ diff --git a/models/apis/cognito-idp/2016-04-18/docs-2.json b/models/apis/cognito-idp/2016-04-18/docs-2.json index bc60438e7fb..90774c8fa68 100644 --- a/models/apis/cognito-idp/2016-04-18/docs-2.json +++ b/models/apis/cognito-idp/2016-04-18/docs-2.json @@ -83,7 +83,7 @@ "RespondToAuthChallenge": "

Responds to the authentication challenge.

", "SetRiskConfiguration": "

Configures actions on detected risks. To delete the risk configuration for UserPoolId or ClientId, pass null values for all four configuration types.

To enable Amazon Cognito advanced security features, update the user pool to include the UserPoolAddOns keyAdvancedSecurityMode.

", "SetUICustomization": "

Sets the UI customization information for a user pool's built-in app UI.

You can specify app UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to ALL). If you specify ALL, the default configuration will be used for every client that has no UI customization set previously. If you specify UI customization settings for a particular client, it will no longer fall back to the ALL configuration.

To use this API, your user pool must have a domain associated with it. Otherwise, there is no place to host the app's pages, and the service will throw an error.

", - "SetUserMFAPreference": "

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are enabled and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are enabled. If multiple options are enabled and no preference is set, a challenge to choose an MFA option will be returned during sign in.

", + "SetUserMFAPreference": "

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are enabled and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are enabled. If multiple options are enabled and no preference is set, a challenge to choose an MFA option will be returned during sign in. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.

", "SetUserPoolMfaConfig": "

Set the user pool multi-factor authentication (MFA) configuration.

", "SetUserSettings": "

This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure TOTP software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.

", "SignUp": "

Registers the user in the specified user pool and creates a user name, password, and user attributes.

", @@ -477,6 +477,8 @@ "CreateGroupRequest$RoleArn": "

The role ARN for the group.

", "CreateUserImportJobRequest$CloudWatchLogsRoleArn": "

The role ARN for the Amazon CloudWatch Logging role for the user import job.

", "CustomDomainConfigType$CertificateArn": "

The Amazon Resource Name (ARN) of an AWS Certificate Manager SSL certificate. You use this certificate for the subdomain of your custom domain.

", + "CustomEmailLambdaVersionConfigType$LambdaArn": "

The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users.

", + "CustomSMSLambdaVersionConfigType$LambdaArn": "

The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users.

", "EmailConfigurationType$SourceArn": "

The Amazon Resource Name (ARN) of a verified email address in Amazon SES. This email address is used in one of the following ways, depending on the value that you specify for the EmailSendingAccount parameter:

", "GroupType$RoleArn": "

The role ARN for the group.

", "LambdaConfigType$PreSignUp": "

A pre-registration AWS Lambda trigger.

", @@ -489,9 +491,10 @@ "LambdaConfigType$VerifyAuthChallengeResponse": "

Verifies the authentication challenge response.

", "LambdaConfigType$PreTokenGeneration": "

A Lambda trigger that is invoked before token generation.

", "LambdaConfigType$UserMigration": "

The user migration Lambda config type.

", + "LambdaConfigType$KMSKeyID": "

The Amazon Resource Name of Key Management Service Customer master keys . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender and CustomSMSSender.

", "ListTagsForResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the user pool that the tags are assigned to.

", "NotifyConfigurationType$SourceArn": "

The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. It permits Amazon Cognito to send for the email address specified in the From parameter.

", - "SmsConfigurationType$SnsCallerArn": "

The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages.

", + "SmsConfigurationType$SnsCallerArn": "

The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages. SMS messages are subject to a spending limit.

", "TagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the user pool to assign the tags to.

", "UntagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the user pool that the tags are assigned to.

", "UpdateGroupRequest$RoleArn": "

The new role ARN for the group. This is used for setting the cognito:roles and cognito:preferred_role claims in the token.

", @@ -631,14 +634,14 @@ "PasswordPolicyType$RequireLowercase": "

In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.

", "PasswordPolicyType$RequireNumbers": "

In the password policy that you have set, refers to whether you have required users to use at least one number in their password.

", "PasswordPolicyType$RequireSymbols": "

In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.

", - "SMSMfaSettingsType$Enabled": "

Specifies whether SMS text message MFA is enabled.

", + "SMSMfaSettingsType$Enabled": "

Specifies whether SMS text message MFA is enabled. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted.

", "SMSMfaSettingsType$PreferredMfa": "

Specifies whether SMS is the preferred MFA method.

", "SchemaAttributeType$DeveloperOnlyAttribute": "

We recommend that you use WriteAttributes in the user pool client to control how attributes can be mutated for new use cases instead of using DeveloperOnlyAttribute.

Specifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users will not be able to modify this attribute using their access token. For example, DeveloperOnlyAttribute can be modified using AdminUpdateUserAttributes but cannot be updated using UpdateUserAttributes.

", "SchemaAttributeType$Mutable": "

Specifies whether the value of the attribute can be changed.

For any user pool attribute that's mapped to an identity provider attribute, you must set this parameter to true. Amazon Cognito updates mapped attributes when users sign in to your application through an identity provider. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see Specifying Identity Provider Attribute Mappings for Your User Pool.

", "SchemaAttributeType$Required": "

Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.

", "SignUpResponse$UserConfirmed": "

A response from the server indicating that a user registration has been confirmed.

", "SoftwareTokenMfaConfigType$Enabled": "

Specifies whether software token MFA is enabled.

", - "SoftwareTokenMfaSettingsType$Enabled": "

Specifies whether software token MFA is enabled.

", + "SoftwareTokenMfaSettingsType$Enabled": "

Specifies whether software token MFA is enabled. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted.

", "SoftwareTokenMfaSettingsType$PreferredMfa": "

Specifies whether software token MFA is the preferred MFA method.

", "UpdateUserPoolClientRequest$AllowedOAuthFlowsUserPoolClient": "

Set to true if the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.

", "UserPoolClientType$AllowedOAuthFlowsUserPoolClient": "

Set to true if the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.

", @@ -995,6 +998,30 @@ "UpdateUserPoolDomainRequest$CustomDomainConfig": "

The configuration for a custom domain that hosts the sign-up and sign-in pages for your application. Use this object to specify an SSL certificate that is managed by ACM.

" } }, + "CustomEmailLambdaVersionConfigType": { + "base": "

A custom email sender Lambda configuration type.

", + "refs": { + "LambdaConfigType$CustomEmailSender": "

A custom email sender AWS Lambda trigger.

" + } + }, + "CustomEmailSenderLambdaVersionType": { + "base": null, + "refs": { + "CustomEmailLambdaVersionConfigType$LambdaVersion": "

The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom email Lambda function. The only supported value is V1_0.

" + } + }, + "CustomSMSLambdaVersionConfigType": { + "base": "

A custom SMS sender Lambda configuration type.

", + "refs": { + "LambdaConfigType$CustomSMSSender": "

A custom SMS sender AWS Lambda trigger.

" + } + }, + "CustomSMSSenderLambdaVersionType": { + "base": null, + "refs": { + "CustomSMSLambdaVersionConfigType$LambdaVersion": "

The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS Lambda function. The only supported value is V1_0.

" + } + }, "DateType": { "base": null, "refs": { @@ -1275,7 +1302,7 @@ } }, "EmailConfigurationType": { - "base": "

The email configuration type.

", + "base": "

The email configuration type.

Amazon Cognito has specific regions for use with Amazon SES. For more information on the supported regions, see Email Settings for Amazon Cognito User Pools.

", "refs": { "CreateUserPoolRequest$EmailConfiguration": "

The email configuration.

", "UpdateUserPoolRequest$EmailConfiguration": "

Email configuration.

", @@ -1298,39 +1325,39 @@ "EmailSendingAccountType": { "base": null, "refs": { - "EmailConfigurationType$EmailSendingAccount": "

Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:

COGNITO_DEFAULT

When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.

To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide.

The default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

DEVELOPER

When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account.

If you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

Before Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role, which is a type of IAM role, in your AWS account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide.

" + "EmailConfigurationType$EmailSendingAccount": "

Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:

COGNITO_DEFAULT

When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.

To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide.

The default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

If EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't allowed:

DEVELOPER EmailSendingAccount is required.

DEVELOPER

When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account.

If you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

Before Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role, which is a type of IAM role, in your AWS account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide.

" } }, "EmailVerificationMessageByLinkType": { "base": null, "refs": { - "VerificationMessageTemplateType$EmailMessageByLink": "

The email message template for sending a confirmation link to the user.

" + "VerificationMessageTemplateType$EmailMessageByLink": "

The email message template for sending a confirmation link to the user. EmailMessageByLink is allowed only if EmailSendingAccount is DEVELOPER.

" } }, "EmailVerificationMessageType": { "base": null, "refs": { - "CreateUserPoolRequest$EmailVerificationMessage": "

A string representing the email verification message.

", - "MessageTemplateType$EmailMessage": "

The message template for email messages.

", + "CreateUserPoolRequest$EmailVerificationMessage": "

A string representing the email verification message. EmailVerificationMessage is allowed only if EmailSendingAccount is DEVELOPER.

", + "MessageTemplateType$EmailMessage": "

The message template for email messages. EmailMessage is allowed only if EmailSendingAccount is DEVELOPER.

", "UpdateUserPoolRequest$EmailVerificationMessage": "

The contents of the email verification message.

", "UserPoolType$EmailVerificationMessage": "

The contents of the email verification message.

", - "VerificationMessageTemplateType$EmailMessage": "

The email message template.

" + "VerificationMessageTemplateType$EmailMessage": "

The email message template. EmailMessage is allowed only if EmailSendingAccount is DEVELOPER.

" } }, "EmailVerificationSubjectByLinkType": { "base": null, "refs": { - "VerificationMessageTemplateType$EmailSubjectByLink": "

The subject line for the email message template for sending a confirmation link to the user.

" + "VerificationMessageTemplateType$EmailSubjectByLink": "

The subject line for the email message template for sending a confirmation link to the user. EmailSubjectByLink is allowed only EmailSendingAccount is DEVELOPER.

" } }, "EmailVerificationSubjectType": { "base": null, "refs": { - "CreateUserPoolRequest$EmailVerificationSubject": "

A string representing the email verification subject.

", - "MessageTemplateType$EmailSubject": "

The subject line for email messages.

", + "CreateUserPoolRequest$EmailVerificationSubject": "

A string representing the email verification subject. EmailVerificationSubject is allowed only if EmailSendingAccount is DEVELOPER.

", + "MessageTemplateType$EmailSubject": "

The subject line for email messages. EmailSubject is allowed only if EmailSendingAccount is DEVELOPER.

", "UpdateUserPoolRequest$EmailVerificationSubject": "

The subject of the email verification message.

", "UserPoolType$EmailVerificationSubject": "

The subject of the email verification message.

", - "VerificationMessageTemplateType$EmailSubject": "

The subject line for the email message template.

" + "VerificationMessageTemplateType$EmailSubject": "

The subject line for the email message template. EmailSubject is allowed only if EmailSendingAccount is DEVELOPER.

" } }, "EnableSoftwareTokenMFAException": { @@ -2295,7 +2322,7 @@ } }, "SMSMfaSettingsType": { - "base": "

The type used for enabling SMS MFA at the user level.

", + "base": "

The type used for enabling SMS MFA at the user level. Phone numbers don't need to be verified to be used for SMS MFA. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.

", "refs": { "AdminSetUserMFAPreferenceRequest$SMSMfaSettings": "

The SMS text message MFA settings.

", "SetUserMFAPreferenceRequest$SMSMfaSettings": "

The SMS text message multi-factor authentication (MFA) settings.

" @@ -2499,7 +2526,7 @@ } }, "SoftwareTokenMfaSettingsType": { - "base": "

The type used for enabling software token MFA at the user level.

", + "base": "

The type used for enabling software token MFA at the user level. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.

", "refs": { "AdminSetUserMFAPreferenceRequest$SoftwareTokenMfaSettings": "

The time-based one-time password software token MFA settings.

", "SetUserMFAPreferenceRequest$SoftwareTokenMfaSettings": "

The time-based one-time password software token MFA settings.

" diff --git a/models/apis/comprehend/2017-11-27/api-2.json b/models/apis/comprehend/2017-11-27/api-2.json index 9da354d64c4..78cc2066d73 100644 --- a/models/apis/comprehend/2017-11-27/api-2.json +++ b/models/apis/comprehend/2017-11-27/api-2.json @@ -304,6 +304,21 @@ {"shape":"InternalServerException"} ] }, + "DescribeEventsDetectionJob":{ + "name":"DescribeEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsDetectionJobRequest"}, + "output":{"shape":"DescribeEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ] + }, "DescribeKeyPhrasesDetectionJob":{ "name":"DescribeKeyPhrasesDetectionJob", "http":{ @@ -543,6 +558,21 @@ {"shape":"InternalServerException"} ] }, + "ListEventsDetectionJobs":{ + "name":"ListEventsDetectionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEventsDetectionJobsRequest"}, + "output":{"shape":"ListEventsDetectionJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidFilterException"}, + {"shape":"InternalServerException"} + ] + }, "ListKeyPhrasesDetectionJobs":{ "name":"ListKeyPhrasesDetectionJobs", "http":{ @@ -666,6 +696,21 @@ {"shape":"InternalServerException"} ] }, + "StartEventsDetectionJob":{ + "name":"StartEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartEventsDetectionJobRequest"}, + "output":{"shape":"StartEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"KmsKeyValidationException"}, + {"shape":"InternalServerException"} + ] + }, "StartKeyPhrasesDetectionJob":{ "name":"StartKeyPhrasesDetectionJob", "http":{ @@ -754,6 +799,20 @@ {"shape":"InternalServerException"} ] }, + "StopEventsDetectionJob":{ + "name":"StopEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopEventsDetectionJobRequest"}, + "output":{"shape":"StopEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InternalServerException"} + ] + }, "StopKeyPhrasesDetectionJob":{ "name":"StopKeyPhrasesDetectionJob", "http":{ @@ -1352,6 +1411,19 @@ "EntityRecognizerProperties":{"shape":"EntityRecognizerProperties"} } }, + "DescribeEventsDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"JobId"} + } + }, + "DescribeEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "EventsDetectionJobProperties":{"shape":"EventsDetectionJobProperties"} + } + }, "DescribeKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -1892,6 +1964,41 @@ "Type":{"shape":"EntityTypeName"} } }, + "EventTypeString":{ + "type":"string", + "max":40, + "min":1, + "pattern":"[A-Z_]*" + }, + "EventsDetectionJobFilter":{ + "type":"structure", + "members":{ + "JobName":{"shape":"JobName"}, + "JobStatus":{"shape":"JobStatus"}, + "SubmitTimeBefore":{"shape":"Timestamp"}, + "SubmitTimeAfter":{"shape":"Timestamp"} + } + }, + "EventsDetectionJobProperties":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobName":{"shape":"JobName"}, + "JobStatus":{"shape":"JobStatus"}, + "Message":{"shape":"AnyLengthString"}, + "SubmitTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"}, + "InputDataConfig":{"shape":"InputDataConfig"}, + "OutputDataConfig":{"shape":"OutputDataConfig"}, + "LanguageCode":{"shape":"LanguageCode"}, + "DataAccessRoleArn":{"shape":"IamRoleArn"}, + "TargetEventTypes":{"shape":"TargetEventTypes"} + } + }, + "EventsDetectionJobPropertiesList":{ + "type":"list", + "member":{"shape":"EventsDetectionJobProperties"} + }, "Float":{"type":"float"}, "IamRoleArn":{ "type":"string", @@ -2134,6 +2241,21 @@ "NextToken":{"shape":"String"} } }, + "ListEventsDetectionJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{"shape":"EventsDetectionJobFilter"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"MaxResultsInteger"} + } + }, + "ListEventsDetectionJobsResponse":{ + "type":"structure", + "members":{ + "EventsDetectionJobPropertiesList":{"shape":"EventsDetectionJobPropertiesList"}, + "NextToken":{"shape":"String"} + } + }, "ListKeyPhrasesDetectionJobsRequest":{ "type":"structure", "members":{ @@ -2602,6 +2724,35 @@ "JobStatus":{"shape":"JobStatus"} } }, + "StartEventsDetectionJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode", + "TargetEventTypes" + ], + "members":{ + "InputDataConfig":{"shape":"InputDataConfig"}, + "OutputDataConfig":{"shape":"OutputDataConfig"}, + "DataAccessRoleArn":{"shape":"IamRoleArn"}, + "JobName":{"shape":"JobName"}, + "LanguageCode":{"shape":"LanguageCode"}, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "idempotencyToken":true + }, + "TargetEventTypes":{"shape":"TargetEventTypes"} + } + }, + "StartEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobStatus":{"shape":"JobStatus"} + } + }, "StartKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":[ @@ -2746,6 +2897,20 @@ "JobStatus":{"shape":"JobStatus"} } }, + "StopEventsDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"JobId"} + } + }, + "StopEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobStatus":{"shape":"JobStatus"} + } + }, "StopKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -2891,6 +3056,11 @@ "max":256, "min":0 }, + "TargetEventTypes":{ + "type":"list", + "member":{"shape":"EventTypeString"}, + "min":1 + }, "TextSizeLimitExceededException":{ "type":"structure", "members":{ diff --git a/models/apis/comprehend/2017-11-27/docs-2.json b/models/apis/comprehend/2017-11-27/docs-2.json index 326ae27dfdd..64bda877e7e 100644 --- a/models/apis/comprehend/2017-11-27/docs-2.json +++ b/models/apis/comprehend/2017-11-27/docs-2.json @@ -20,6 +20,7 @@ "DescribeEndpoint": "

Gets the properties associated with a specific endpoint. Use this operation to get the status of an endpoint.

", "DescribeEntitiesDetectionJob": "

Gets the properties associated with an entities detection job. Use this operation to get the status of a detection job.

", "DescribeEntityRecognizer": "

Provides details about an entity recognizer including status, S3 buckets containing training data, recognizer metadata, metrics, and so on.

", + "DescribeEventsDetectionJob": "

Gets the status and details of an events detection job.

", "DescribeKeyPhrasesDetectionJob": "

Gets the properties associated with a key phrases detection job. Use this operation to get the status of a detection job.

", "DescribePiiEntitiesDetectionJob": "

Gets the properties associated with a PII entities detection job. For example, you can use this operation to get the job status.

", "DescribeSentimentDetectionJob": "

Gets the properties associated with a sentiment detection job. Use this operation to get the status of a detection job.

", @@ -36,6 +37,7 @@ "ListEndpoints": "

Gets a list of all existing endpoints that you've created.

", "ListEntitiesDetectionJobs": "

Gets a list of the entity detection jobs that you have submitted.

", "ListEntityRecognizers": "

Gets a list of the properties of all entity recognizers that you created, including recognizers currently in training. Allows you to filter the list of recognizers based on criteria such as status and submission time. This call returns up to 500 entity recognizers in the list, with a default number of 100 recognizers in the list.

The results of this list are not in any particular order. Please get the list and sort locally if needed.

", + "ListEventsDetectionJobs": "

Gets a list of the events detection jobs that you have submitted.

", "ListKeyPhrasesDetectionJobs": "

Get a list of key phrase detection jobs that you have submitted.

", "ListPiiEntitiesDetectionJobs": "

Gets a list of the PII entity detection jobs that you have submitted.

", "ListSentimentDetectionJobs": "

Gets a list of sentiment detection jobs that you have submitted.

", @@ -44,12 +46,14 @@ "StartDocumentClassificationJob": "

Starts an asynchronous document classification job. Use the operation to track the progress of the job.

", "StartDominantLanguageDetectionJob": "

Starts an asynchronous dominant language detection job for a collection of documents. Use the operation to track the status of a job.

", "StartEntitiesDetectionJob": "

Starts an asynchronous entity detection job for a collection of documents. Use the operation to track the status of a job.

This API can be used for either standard entity detection or custom entity recognition. In order to be used for custom entity recognition, the optional EntityRecognizerArn must be used in order to provide access to the recognizer being used to detect the custom entity.

", + "StartEventsDetectionJob": "

Starts an asynchronous event detection job for a collection of documents.

", "StartKeyPhrasesDetectionJob": "

Starts an asynchronous key phrase detection job for a collection of documents. Use the operation to track the status of a job.

", "StartPiiEntitiesDetectionJob": "

Starts an asynchronous PII entity detection job for a collection of documents.

", "StartSentimentDetectionJob": "

Starts an asynchronous sentiment detection job for a collection of documents. use the operation to track the status of a job.

", "StartTopicsDetectionJob": "

Starts an asynchronous topic detection job. Use the DescribeTopicDetectionJob operation to track the status of a job.

", "StopDominantLanguageDetectionJob": "

Stops a dominant language detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

", "StopEntitiesDetectionJob": "

Stops an entities detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

", + "StopEventsDetectionJob": "

Stops an events detection job in progress.

", "StopKeyPhrasesDetectionJob": "

Stops a key phrases detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

", "StopPiiEntitiesDetectionJob": "

Stops a PII entities detection job in progress.

", "StopSentimentDetectionJob": "

Stops a sentiment detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is be stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

", @@ -70,6 +74,7 @@ "EntitiesDetectionJobProperties$Message": "

A description of the status of a job.

", "EntityRecognizerMetadataEntityTypesListItem$Type": "

Type of entity from the list of entity types in the metadata of an entity recognizer.

", "EntityRecognizerProperties$Message": "

A description of the status of the recognizer.

", + "EventsDetectionJobProperties$Message": "

A description of the status of the events detection job.

", "KeyPhrasesDetectionJobProperties$Message": "

A description of the status of a job.

", "PiiEntitiesDetectionJobProperties$Message": "

A description of the status of a job.

", "SentimentDetectionJobProperties$Message": "

A description of the status of a job.

", @@ -227,6 +232,7 @@ "StartDocumentClassificationJobRequest$ClientRequestToken": "

A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.

", "StartDominantLanguageDetectionJobRequest$ClientRequestToken": "

A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.

", "StartEntitiesDetectionJobRequest$ClientRequestToken": "

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", + "StartEventsDetectionJobRequest$ClientRequestToken": "

An unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", "StartKeyPhrasesDetectionJobRequest$ClientRequestToken": "

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", "StartPiiEntitiesDetectionJobRequest$ClientRequestToken": "

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", "StartSentimentDetectionJobRequest$ClientRequestToken": "

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", @@ -420,6 +426,16 @@ "refs": { } }, + "DescribeEventsDetectionJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeEventsDetectionJobResponse": { + "base": null, + "refs": { + } + }, "DescribeKeyPhrasesDetectionJobRequest": { "base": null, "refs": { @@ -662,11 +678,11 @@ "EntityRecognizerEvaluationMetrics$F1Score": "

A measure of how accurate the recognizer results are for the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.

", "EntityTypesEvaluationMetrics$Precision": "

A measure of the usefulness of the recognizer results for a specific entity type in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones.

", "EntityTypesEvaluationMetrics$Recall": "

A measure of how complete the recognizer results are for a specific entity type in the test data. High recall means that the recognizer returned most of the relevant results.

", - "EntityTypesEvaluationMetrics$F1Score": "

A measure of how accurate the recognizer results are for for a specific entity type in the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.

" + "EntityTypesEvaluationMetrics$F1Score": "

A measure of how accurate the recognizer results are for a specific entity type in the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.

" } }, "EndpointFilter": { - "base": "

The filter used to determine which endpoints are are returned. You can filter jobs on their name, model, status, or the date and time that they were created. You can only set one filter at a time.

", + "base": "

The filter used to determine which endpoints are returned. You can filter jobs on their name, model, status, or the date and time that they were created. You can only set one filter at a time.

", "refs": { "ListEndpointsRequest$Filter": "

Filters the endpoints that are returned. You can filter endpoints on their name, model, status, or the date and time that they were created. You can only set one filter at a time.

" } @@ -844,6 +860,31 @@ "EntityTypesList$member": null } }, + "EventTypeString": { + "base": null, + "refs": { + "TargetEventTypes$member": null + } + }, + "EventsDetectionJobFilter": { + "base": "

Provides information for filtering a list of event detection jobs.

", + "refs": { + "ListEventsDetectionJobsRequest$Filter": "

Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.

" + } + }, + "EventsDetectionJobProperties": { + "base": "

Provides information about an events detection job.

", + "refs": { + "DescribeEventsDetectionJobResponse$EventsDetectionJobProperties": "

An object that contains the properties associated with an event detection job.

", + "EventsDetectionJobPropertiesList$member": null + } + }, + "EventsDetectionJobPropertiesList": { + "base": null, + "refs": { + "ListEventsDetectionJobsResponse$EventsDetectionJobPropertiesList": "

A list containing the properties of each job that is returned.

" + } + }, "Float": { "base": null, "refs": { @@ -870,12 +911,14 @@ "DominantLanguageDetectionJobProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

", "EntitiesDetectionJobProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

", "EntityRecognizerProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.

", + "EventsDetectionJobProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identify and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

", "KeyPhrasesDetectionJobProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

", "PiiEntitiesDetectionJobProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

", "SentimentDetectionJobProperties$DataAccessRoleArn": "

The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.

", "StartDocumentClassificationJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

", "StartDominantLanguageDetectionJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

", "StartEntitiesDetectionJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

", + "StartEventsDetectionJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

", "StartKeyPhrasesDetectionJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

", "StartPiiEntitiesDetectionJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

", "StartSentimentDetectionJobRequest$DataAccessRoleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.

", @@ -898,12 +941,14 @@ "DocumentClassificationJobProperties$InputDataConfig": "

The input data configuration that you supplied when you created the document classification job.

", "DominantLanguageDetectionJobProperties$InputDataConfig": "

The input data configuration that you supplied when you created the dominant language detection job.

", "EntitiesDetectionJobProperties$InputDataConfig": "

The input data configuration that you supplied when you created the entities detection job.

", + "EventsDetectionJobProperties$InputDataConfig": "

The input data configuration that you supplied when you created the events detection job.

", "KeyPhrasesDetectionJobProperties$InputDataConfig": "

The input data configuration that you supplied when you created the key phrases detection job.

", "PiiEntitiesDetectionJobProperties$InputDataConfig": "

The input properties for a PII entities detection job.

", "SentimentDetectionJobProperties$InputDataConfig": "

The input data configuration that you supplied when you created the sentiment detection job.

", "StartDocumentClassificationJobRequest$InputDataConfig": "

Specifies the format and location of the input data for the job.

", "StartDominantLanguageDetectionJobRequest$InputDataConfig": "

Specifies the format and location of the input data for the job.

", "StartEntitiesDetectionJobRequest$InputDataConfig": "

Specifies the format and location of the input data for the job.

", + "StartEventsDetectionJobRequest$InputDataConfig": "

Specifies the format and location of the input data for the job.

", "StartKeyPhrasesDetectionJobRequest$InputDataConfig": "

Specifies the format and location of the input data for the job.

", "StartPiiEntitiesDetectionJobRequest$InputDataConfig": "

The input properties for a PII entities detection job.

", "StartSentimentDetectionJobRequest$InputDataConfig": "

Specifies the format and location of the input data for the job.

", @@ -965,6 +1010,7 @@ "DescribeDocumentClassificationJobRequest$JobId": "

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

", "DescribeDominantLanguageDetectionJobRequest$JobId": "

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

", "DescribeEntitiesDetectionJobRequest$JobId": "

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

", + "DescribeEventsDetectionJobRequest$JobId": "

The identifier of the events detection job.

", "DescribeKeyPhrasesDetectionJobRequest$JobId": "

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

", "DescribePiiEntitiesDetectionJobRequest$JobId": "

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

", "DescribeSentimentDetectionJobRequest$JobId": "

The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.

", @@ -972,12 +1018,14 @@ "DocumentClassificationJobProperties$JobId": "

The identifier assigned to the document classification job.

", "DominantLanguageDetectionJobProperties$JobId": "

The identifier assigned to the dominant language detection job.

", "EntitiesDetectionJobProperties$JobId": "

The identifier assigned to the entities detection job.

", + "EventsDetectionJobProperties$JobId": "

The identifier assigned to the events detection job.

", "KeyPhrasesDetectionJobProperties$JobId": "

The identifier assigned to the key phrases detection job.

", "PiiEntitiesDetectionJobProperties$JobId": "

The identifier assigned to the PII entities detection job.

", "SentimentDetectionJobProperties$JobId": "

The identifier assigned to the sentiment detection job.

", "StartDocumentClassificationJobResponse$JobId": "

The identifier generated for the job. To get the status of the job, use this identifier with the operation.

", "StartDominantLanguageDetectionJobResponse$JobId": "

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

", "StartEntitiesDetectionJobResponse$JobId": "

The identifier generated for the job. To get the status of job, use this identifier with the operation.

", + "StartEventsDetectionJobResponse$JobId": "

An unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", "StartKeyPhrasesDetectionJobResponse$JobId": "

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

", "StartPiiEntitiesDetectionJobResponse$JobId": "

The identifier generated for the job.

", "StartSentimentDetectionJobResponse$JobId": "

The identifier generated for the job. To get the status of a job, use this identifier with the operation.

", @@ -986,6 +1034,8 @@ "StopDominantLanguageDetectionJobResponse$JobId": "

The identifier of the dominant language detection job to stop.

", "StopEntitiesDetectionJobRequest$JobId": "

The identifier of the entities detection job to stop.

", "StopEntitiesDetectionJobResponse$JobId": "

The identifier of the entities detection job to stop.

", + "StopEventsDetectionJobRequest$JobId": "

The identifier of the events detection job to stop.

", + "StopEventsDetectionJobResponse$JobId": "

The identifier of the events detection job to stop.

", "StopKeyPhrasesDetectionJobRequest$JobId": "

The identifier of the key phrases detection job to stop.

", "StopKeyPhrasesDetectionJobResponse$JobId": "

The identifier of the key phrases detection job to stop.

", "StopPiiEntitiesDetectionJobRequest$JobId": "

The identifier of the PII entities detection job to stop.

", @@ -1004,6 +1054,8 @@ "DominantLanguageDetectionJobProperties$JobName": "

The name that you assigned to the dominant language detection job.

", "EntitiesDetectionJobFilter$JobName": "

Filters on the name of the job.

", "EntitiesDetectionJobProperties$JobName": "

The name that you assigned the entities detection job.

", + "EventsDetectionJobFilter$JobName": "

Filters on the name of the events detection job.

", + "EventsDetectionJobProperties$JobName": "

The name you assigned the events detection job.

", "KeyPhrasesDetectionJobFilter$JobName": "

Filters on the name of the job.

", "KeyPhrasesDetectionJobProperties$JobName": "

The name that you assigned the key phrases detection job.

", "PiiEntitiesDetectionJobFilter$JobName": "

Filters on the name of the job.

", @@ -1013,6 +1065,7 @@ "StartDocumentClassificationJobRequest$JobName": "

The identifier of the job.

", "StartDominantLanguageDetectionJobRequest$JobName": "

An identifier for the job.

", "StartEntitiesDetectionJobRequest$JobName": "

The identifier of the job.

", + "StartEventsDetectionJobRequest$JobName": "

The identifier of the events detection job.

", "StartKeyPhrasesDetectionJobRequest$JobName": "

The identifier of the job.

", "StartPiiEntitiesDetectionJobRequest$JobName": "

The identifier of the job.

", "StartSentimentDetectionJobRequest$JobName": "

The identifier of the job.

", @@ -1035,6 +1088,8 @@ "DominantLanguageDetectionJobProperties$JobStatus": "

The current status of the dominant language detection job. If the status is FAILED, the Message field shows the reason for the failure.

", "EntitiesDetectionJobFilter$JobStatus": "

Filters the list of jobs based on job status. Returns only jobs with the specified status.

", "EntitiesDetectionJobProperties$JobStatus": "

The current status of the entities detection job. If the status is FAILED, the Message field shows the reason for the failure.

", + "EventsDetectionJobFilter$JobStatus": "

Filters the list of jobs based on job status. Returns only jobs with the specified status.

", + "EventsDetectionJobProperties$JobStatus": "

The current status of the events detection job.

", "KeyPhrasesDetectionJobFilter$JobStatus": "

Filters the list of jobs based on job status. Returns only jobs with the specified status.

", "KeyPhrasesDetectionJobProperties$JobStatus": "

The current status of the key phrases detection job. If the status is FAILED, the Message field shows the reason for the failure.

", "PiiEntitiesDetectionJobFilter$JobStatus": "

Filters the list of jobs based on job status. Returns only jobs with the specified status.

", @@ -1044,12 +1099,14 @@ "StartDocumentClassificationJobResponse$JobStatus": "

The status of the job:

", "StartDominantLanguageDetectionJobResponse$JobStatus": "

The status of the job.

", "StartEntitiesDetectionJobResponse$JobStatus": "

The status of the job.

", + "StartEventsDetectionJobResponse$JobStatus": "

The status of the events detection job.

", "StartKeyPhrasesDetectionJobResponse$JobStatus": "

The status of the job.

", "StartPiiEntitiesDetectionJobResponse$JobStatus": "

The status of the job.

", "StartSentimentDetectionJobResponse$JobStatus": "

The status of the job.

", "StartTopicsDetectionJobResponse$JobStatus": "

The status of the job:

", "StopDominantLanguageDetectionJobResponse$JobStatus": "

Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopDominantLanguageDetectionJob operation.

", "StopEntitiesDetectionJobResponse$JobStatus": "

Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopEntitiesDetectionJob operation.

", + "StopEventsDetectionJobResponse$JobStatus": "

The status of the events detection job.

", "StopKeyPhrasesDetectionJobResponse$JobStatus": "

Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopKeyPhrasesDetectionJob operation.

", "StopPiiEntitiesDetectionJobResponse$JobStatus": "

The status of the PII entities detection job.

", "StopSentimentDetectionJobResponse$JobStatus": "

Either STOP_REQUESTED if the job is currently running, or STOPPED if the job was previously stopped with the StopSentimentDetectionJob operation.

", @@ -1132,10 +1189,12 @@ "DocumentClassifierProperties$LanguageCode": "

The language code for the language of the documents that the classifier was trained on.

", "EntitiesDetectionJobProperties$LanguageCode": "

The language code of the input documents.

", "EntityRecognizerProperties$LanguageCode": "

The language of the input documents. All documents must be in the same language. Only English (\"en\") is currently supported.

", + "EventsDetectionJobProperties$LanguageCode": "

The language code of the input documents.

", "KeyPhrasesDetectionJobProperties$LanguageCode": "

The language code of the input documents.

", "PiiEntitiesDetectionJobProperties$LanguageCode": "

The language code of the input documents

", "SentimentDetectionJobProperties$LanguageCode": "

The language code of the input documents.

", "StartEntitiesDetectionJobRequest$LanguageCode": "

The language of the input documents. All documents must be in the same language. You can specify any of the languages supported by Amazon Comprehend. If custom entities recognition is used, this parameter is ignored and the language used for training the model is used instead.

", + "StartEventsDetectionJobRequest$LanguageCode": "

The language code of the input documents.

", "StartKeyPhrasesDetectionJobRequest$LanguageCode": "

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.

", "StartPiiEntitiesDetectionJobRequest$LanguageCode": "

The language of the input documents.

", "StartSentimentDetectionJobRequest$LanguageCode": "

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.

" @@ -1201,6 +1260,16 @@ "refs": { } }, + "ListEventsDetectionJobsRequest": { + "base": null, + "refs": { + } + }, + "ListEventsDetectionJobsResponse": { + "base": null, + "refs": { + } + }, "ListKeyPhrasesDetectionJobsRequest": { "base": null, "refs": { @@ -1271,7 +1340,7 @@ "ListOfLabels": { "base": null, "refs": { - "ClassifyDocumentResponse$Labels": "

The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not multually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.

" + "ClassifyDocumentResponse$Labels": "

The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not mutually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.

" } }, "ListOfPiiEntities": { @@ -1348,6 +1417,7 @@ "ListEndpointsRequest$MaxResults": "

The maximum number of results to return in each page. The default is 100.

", "ListEntitiesDetectionJobsRequest$MaxResults": "

The maximum number of results to return in each page. The default is 100.

", "ListEntityRecognizersRequest$MaxResults": "

The maximum number of results to return on each page. The default is 100.

", + "ListEventsDetectionJobsRequest$MaxResults": "

The maximum number of results to return in each page.

", "ListKeyPhrasesDetectionJobsRequest$MaxResults": "

The maximum number of results to return in each page. The default is 100.

", "ListPiiEntitiesDetectionJobsRequest$MaxResults": "

The maximum number of results to return in each page.

", "ListSentimentDetectionJobsRequest$MaxResults": "

The maximum number of results to return in each page. The default is 100.

", @@ -1375,11 +1445,13 @@ "DocumentClassificationJobProperties$OutputDataConfig": "

The output data configuration that you supplied when you created the document classification job.

", "DominantLanguageDetectionJobProperties$OutputDataConfig": "

The output data configuration that you supplied when you created the dominant language detection job.

", "EntitiesDetectionJobProperties$OutputDataConfig": "

The output data configuration that you supplied when you created the entities detection job.

", + "EventsDetectionJobProperties$OutputDataConfig": "

The output data configuration that you supplied when you created the events detection job.

", "KeyPhrasesDetectionJobProperties$OutputDataConfig": "

The output data configuration that you supplied when you created the key phrases detection job.

", "SentimentDetectionJobProperties$OutputDataConfig": "

The output data configuration that you supplied when you created the sentiment detection job.

", "StartDocumentClassificationJobRequest$OutputDataConfig": "

Specifies where to send the output files.

", "StartDominantLanguageDetectionJobRequest$OutputDataConfig": "

Specifies where to send the output files.

", "StartEntitiesDetectionJobRequest$OutputDataConfig": "

Specifies where to send the output files.

", + "StartEventsDetectionJobRequest$OutputDataConfig": "

Specifies where to send the output files.

", "StartKeyPhrasesDetectionJobRequest$OutputDataConfig": "

Specifies where to send the output files.

", "StartPiiEntitiesDetectionJobRequest$OutputDataConfig": "

Provides configuration parameters for the output of PII entity detection jobs.

", "StartSentimentDetectionJobRequest$OutputDataConfig": "

Specifies where to send the output files.

", @@ -1566,6 +1638,16 @@ "refs": { } }, + "StartEventsDetectionJobRequest": { + "base": null, + "refs": { + } + }, + "StartEventsDetectionJobResponse": { + "base": null, + "refs": { + } + }, "StartKeyPhrasesDetectionJobRequest": { "base": null, "refs": { @@ -1626,6 +1708,16 @@ "refs": { } }, + "StopEventsDetectionJobRequest": { + "base": null, + "refs": { + } + }, + "StopEventsDetectionJobResponse": { + "base": null, + "refs": { + } + }, "StopKeyPhrasesDetectionJobRequest": { "base": null, "refs": { @@ -1706,6 +1798,8 @@ "ListEntitiesDetectionJobsResponse$NextToken": "

Identifies the next page of results to return.

", "ListEntityRecognizersRequest$NextToken": "

Identifies the next page of results to return.

", "ListEntityRecognizersResponse$NextToken": "

Identifies the next page of results to return.

", + "ListEventsDetectionJobsRequest$NextToken": "

Identifies the next page of results to return.

", + "ListEventsDetectionJobsResponse$NextToken": "

Identifies the next page of results to return.

", "ListKeyPhrasesDetectionJobsRequest$NextToken": "

Identifies the next page of results to return.

", "ListKeyPhrasesDetectionJobsResponse$NextToken": "

Identifies the next page of results to return.

", "ListPiiEntitiesDetectionJobsRequest$NextToken": "

Identifies the next page of results to return.

", @@ -1796,6 +1890,13 @@ "Tag$Value": "

The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.

" } }, + "TargetEventTypes": { + "base": null, + "refs": { + "EventsDetectionJobProperties$TargetEventTypes": "

The types of events that are detected by the job.

", + "StartEventsDetectionJobRequest$TargetEventTypes": "

The types of events to detect in the input documents.

" + } + }, "TextSizeLimitExceededException": { "base": "

The size of the input text exceeds the limit. Use a smaller document.

", "refs": { @@ -1832,6 +1933,10 @@ "EntityRecognizerProperties$EndTime": "

The time that the recognizer creation completed.

", "EntityRecognizerProperties$TrainingStartTime": "

The time that training of the entity recognizer started.

", "EntityRecognizerProperties$TrainingEndTime": "

The time that training of the entity recognizer was completed.

", + "EventsDetectionJobFilter$SubmitTimeBefore": "

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

", + "EventsDetectionJobFilter$SubmitTimeAfter": "

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

", + "EventsDetectionJobProperties$SubmitTime": "

The time that the events detection job was submitted for processing.

", + "EventsDetectionJobProperties$EndTime": "

The time that the events detection job completed.

", "KeyPhrasesDetectionJobFilter$SubmitTimeBefore": "

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

", "KeyPhrasesDetectionJobFilter$SubmitTimeAfter": "

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

", "KeyPhrasesDetectionJobProperties$SubmitTime": "

The time that the key phrases detection job was submitted for processing.

", @@ -1910,7 +2015,7 @@ } }, "VpcConfig": { - "base": "

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for the job. For For more information, see Amazon VPC.

", + "base": "

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for the job. For more information, see Amazon VPC.

", "refs": { "CreateDocumentClassifierRequest$VpcConfig": "

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.

", "CreateEntityRecognizerRequest$VpcConfig": "

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your custom entity recognizer. For more information, see Amazon VPC.

", diff --git a/models/apis/comprehend/2017-11-27/paginators-1.json b/models/apis/comprehend/2017-11-27/paginators-1.json index 0a98c5b4e9f..3c7889ffc02 100644 --- a/models/apis/comprehend/2017-11-27/paginators-1.json +++ b/models/apis/comprehend/2017-11-27/paginators-1.json @@ -25,6 +25,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListEventsDetectionJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListKeyPhrasesDetectionJobs": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/elasticbeanstalk/2010-12-01/api-2.json b/models/apis/elasticbeanstalk/2010-12-01/api-2.json index a082cb3d0af..d535ed6bf6d 100644 --- a/models/apis/elasticbeanstalk/2010-12-01/api-2.json +++ b/models/apis/elasticbeanstalk/2010-12-01/api-2.json @@ -1296,7 +1296,7 @@ "EnvironmentId":{"shape":"EnvironmentId"}, "EnvironmentName":{"shape":"EnvironmentName"}, "NextToken":{"shape":"String"}, - "MaxItems":{"shape":"Integer"} + "MaxItems":{"shape":"ManagedActionHistoryMaxItems"} } }, "DescribeEnvironmentManagedActionHistoryResult":{ @@ -1857,6 +1857,11 @@ "max":100, "min":1 }, + "ManagedActionHistoryMaxItems":{ + "type":"integer", + "max":100, + "min":1 + }, "ManagedActionInvalidStateException":{ "type":"structure", "members":{ diff --git a/models/apis/elasticbeanstalk/2010-12-01/docs-2.json b/models/apis/elasticbeanstalk/2010-12-01/docs-2.json index 0943f1d56ac..d4cd9ae25a3 100644 --- a/models/apis/elasticbeanstalk/2010-12-01/docs-2.json +++ b/models/apis/elasticbeanstalk/2010-12-01/docs-2.json @@ -1061,7 +1061,6 @@ "Integer": { "base": null, "refs": { - "DescribeEnvironmentManagedActionHistoryRequest$MaxItems": "

The maximum number of items to return for a single request.

", "Listener$Port": "

The port that is used by the Listener.

" } }, @@ -1202,6 +1201,12 @@ "DescribeEnvironmentManagedActionHistoryResult$ManagedActionHistoryItems": "

A list of completed and failed managed actions.

" } }, + "ManagedActionHistoryMaxItems": { + "base": null, + "refs": { + "DescribeEnvironmentManagedActionHistoryRequest$MaxItems": "

The maximum number of items to return for a single request.

" + } + }, "ManagedActionInvalidStateException": { "base": "

Cannot modify the managed action in its current state.

", "refs": { diff --git a/models/apis/fsx/2018-03-01/api-2.json b/models/apis/fsx/2018-03-01/api-2.json index d099a292171..4807c2c0825 100644 --- a/models/apis/fsx/2018-03-01/api-2.json +++ b/models/apis/fsx/2018-03-01/api-2.json @@ -496,7 +496,8 @@ "CREATING", "TRANSFERRING", "DELETED", - "FAILED" + "FAILED", + "PENDING" ] }, "BackupNotFound":{ diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index c587190316d..73f90b2c770 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -4,8 +4,8 @@ "operations": { "AssociateFileSystemAliases": "

Use this action to associate one or more Domain Name Server (DNS) aliases with an existing Amazon FSx for Windows File Server file system. A file systen can have a maximum of 50 DNS aliases associated with it at any one time. If you try to associate a DNS alias that is already associated with the file system, FSx takes no action on that alias in the request. For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.

The system response shows the DNS aliases that Amazon FSx is attempting to associate with the file system. Use the API operation to monitor the status of the aliases Amazon FSx is associating with the file system.

", "CancelDataRepositoryTask": "

Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following.

", - "CreateBackup": "

Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Lustre backups.

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Windows backups.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", - "CreateDataRepositoryTask": "

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Using Data Repository Tasks. To learn more about linking a data repository to your file system, see Setting the Export Prefix.

", + "CreateBackup": "

Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Lustre backups.

For more information about backing up Amazon FSx for Windows file systems, see Working with FSx for Windows backups.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", + "CreateDataRepositoryTask": "

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

", "CreateFileSystem": "

Creates a new, empty Amazon FSx file system.

If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following:

This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

", "CreateFileSystemFromBackup": "

Creates a new Amazon FSx file system from an existing Amazon FSx backup.

If a file system with the specified client request token exists and the parameters match, this operation returns the description of the file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, this operation does the following:

Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.

By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystemFromBackup call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

", "DeleteBackup": "

Deletes an Amazon FSx backup, deleting its contents. After deletion, the backup no longer exists, and its data is gone.

The DeleteBackup call returns instantly. The backup will not show up in later DescribeBackups calls.

The data in a deleted backup is also deleted and can't be recovered by any means.

", @@ -18,7 +18,7 @@ "ListTagsForResource": "

Lists tags for an Amazon FSx file systems and backups in the case of Amazon FSx for Windows File Server.

When retrieving all tags, you can optionally specify the MaxResults parameter to limit the number of tags in a response. If more tags remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your tags. ListTagsForResource is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

", "TagResource": "

Tags an Amazon FSx resource.

", "UntagResource": "

This action removes a tag from an Amazon FSx resource.

", - "UpdateFileSystem": "

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For Amazon FSx for Windows File Server file systems, you can update the following properties:

For Amazon FSx for Lustre file systems, you can update the following properties:

" + "UpdateFileSystem": "

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For Amazon FSx for Windows File Server file systems, you can update the following properties:

For Amazon FSx for Lustre file systems, you can update the following properties:

" }, "shapes": { "AWSAccountId": { @@ -53,7 +53,7 @@ } }, "AdministrativeAction": { - "base": "

Describes a specific Amazon FSx Administrative Action for the current Windows file system.

", + "base": "

Describes a specific Amazon FSx administrative action for the current Windows or Lustre file system.

", "refs": { "AdministrativeActions$member": null } @@ -65,7 +65,7 @@ } }, "AdministrativeActionType": { - "base": "

Describes the type of administrative action, as follows:

", + "base": "

Describes the type of administrative action, as follows:

", "refs": { "AdministrativeAction$AdministrativeActionType": null } @@ -89,7 +89,7 @@ } }, "Aliases": { - "base": "

An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see DNS aliases.

", + "base": "

An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see DNS aliases.

", "refs": { "AssociateFileSystemAliasesResponse$Aliases": "

An array of the DNS aliases that Amazon FSx is associating with the file system.

", "DescribeFileSystemAliasesResponse$Aliases": "

An array of one or more DNS aliases currently associated with the specified file system.

", @@ -108,7 +108,7 @@ "base": null, "refs": { "AssociateFileSystemAliasesRequest$Aliases": "

An array of one or more DNS alias names to associate with the file system. The alias name has to comply with the following formatting requirements:

For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

", - "CreateFileSystemWindowsConfiguration$Aliases": "

An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.

For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.

An alias name has to meet the following requirements:

For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

", + "CreateFileSystemWindowsConfiguration$Aliases": "

An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.

For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.

An alias name has to meet the following requirements:

For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

", "DisassociateFileSystemAliasesRequest$Aliases": "

An array of one or more DNS alias names to disassociate, or remove, from the file system.

" } }, @@ -152,7 +152,7 @@ } }, "Backup": { - "base": "

A backup of an Amazon FSx for file system.

", + "base": "

A backup of an Amazon FSx file system. For more information see:

", "refs": { "Backups$member": null, "CreateBackupResponse$Backup": "

A description of the backup.

" @@ -188,9 +188,9 @@ } }, "BackupLifecycle": { - "base": "

The lifecycle status of the backup.

", + "base": "

The lifecycle status of the backup.

", "refs": { - "Backup$Lifecycle": "

The lifecycle status of the backup.

", + "Backup$Lifecycle": "

The lifecycle status of the backup.

", "DeleteBackupResponse$Lifecycle": "

The lifecycle of the backup. Should be DELETED.

" } }, @@ -1013,15 +1013,15 @@ "Status": { "base": null, "refs": { - "AdministrativeAction$Status": "

Describes the status of the administrative action, as follows:

" + "AdministrativeAction$Status": "

Describes the status of the administrative action, as follows:

" } }, "StorageCapacity": { "base": "

The storage capacity for your Amazon FSx file system, in gibibytes.

", "refs": { "CreateFileSystemRequest$StorageCapacity": "

Sets the storage capacity of the file system that you're creating.

For Lustre file systems:

For Windows file systems:

", - "FileSystem$StorageCapacity": "

The storage capacity of the file system in gigabytes (GB).

", - "UpdateFileSystemRequest$StorageCapacity": "

Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server file system. Specifies the storage capacity target value, GiB, for the file system you're updating. The storage capacity target value must be at least 10 percent (%) greater than the current storage capacity value. In order to increase storage capacity, the file system needs to have at least 16 MB/s of throughput capacity. You cannot make a storage capacity increase request if there is an existing storage capacity increase request in progress. For more information, see Managing Storage Capacity.

" + "FileSystem$StorageCapacity": "

The storage capacity of the file system in gibibytes (GiB).

", + "UpdateFileSystemRequest$StorageCapacity": "

Use this parameter to increase the storage capacity of an Amazon FSx file system. Specifies the storage capacity target value, GiB, to increase the storage capacity for the file system that you're updating. You cannot make a storage capacity increase request if there is an existing storage capacity increase request in progress.

For Windows file systems, the storage capacity target value must be at least 10 percent (%) greater than the current storage capacity value. In order to increase storage capacity, the file system must have at least 16 MB/s of throughput capacity.

For Lustre file systems, the storage capacity target value can be the following:

For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide and Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.

" } }, "StorageType": { diff --git a/models/apis/gamelift/2015-10-01/api-2.json b/models/apis/gamelift/2015-10-01/api-2.json index 66a58148121..9beba43ca96 100644 --- a/models/apis/gamelift/2015-10-01/api-2.json +++ b/models/apis/gamelift/2015-10-01/api-2.json @@ -1727,7 +1727,6 @@ "type":"structure", "required":[ "Name", - "GameSessionQueueArns", "RequestTimeoutSeconds", "AcceptanceRequired", "RuleSetName" @@ -1746,6 +1745,7 @@ "GameProperties":{"shape":"GamePropertyList"}, "GameSessionData":{"shape":"GameSessionData"}, "BackfillMode":{"shape":"BackfillMode"}, + "FlexMatchMode":{"shape":"FlexMatchMode"}, "Tags":{"shape":"TagList"} } }, @@ -2432,6 +2432,14 @@ "c5.12xlarge", "c5.18xlarge", "c5.24xlarge", + "c5a.large", + "c5a.xlarge", + "c5a.2xlarge", + "c5a.4xlarge", + "c5a.8xlarge", + "c5a.12xlarge", + "c5a.16xlarge", + "c5a.24xlarge", "r3.large", "r3.xlarge", "r3.2xlarge", @@ -2451,6 +2459,14 @@ "r5.12xlarge", "r5.16xlarge", "r5.24xlarge", + "r5a.large", + "r5a.xlarge", + "r5a.2xlarge", + "r5a.4xlarge", + "r5a.8xlarge", + "r5a.12xlarge", + "r5a.16xlarge", + "r5a.24xlarge", "m3.medium", "m3.large", "m3.xlarge", @@ -2467,7 +2483,15 @@ "m5.8xlarge", "m5.12xlarge", "m5.16xlarge", - "m5.24xlarge" + "m5.24xlarge", + "m5a.large", + "m5a.xlarge", + "m5a.2xlarge", + "m5a.4xlarge", + "m5a.8xlarge", + "m5a.12xlarge", + "m5a.16xlarge", + "m5a.24xlarge" ] }, "Event":{ @@ -2641,6 +2665,13 @@ "type":"list", "member":{"shape":"FleetUtilization"} }, + "FlexMatchMode":{ + "type":"string", + "enum":[ + "STANDALONE", + "WITH_QUEUE" + ] + }, "Float":{"type":"float"}, "FreeText":{"type":"string"}, "GameProperty":{ @@ -3390,7 +3421,8 @@ "CreationTime":{"shape":"Timestamp"}, "GameProperties":{"shape":"GamePropertyList"}, "GameSessionData":{"shape":"GameSessionData"}, - "BackfillMode":{"shape":"BackfillMode"} + "BackfillMode":{"shape":"BackfillMode"}, + "FlexMatchMode":{"shape":"FlexMatchMode"} } }, "MatchmakingConfigurationArn":{ @@ -4006,7 +4038,6 @@ "type":"structure", "required":[ "ConfigurationName", - "GameSessionArn", "Players" ], "members":{ @@ -4382,7 +4413,8 @@ "CustomEventData":{"shape":"CustomEventData"}, "GameProperties":{"shape":"GamePropertyList"}, "GameSessionData":{"shape":"GameSessionData"}, - "BackfillMode":{"shape":"BackfillMode"} + "BackfillMode":{"shape":"BackfillMode"}, + "FlexMatchMode":{"shape":"FlexMatchMode"} } }, "UpdateMatchmakingConfigurationOutput":{ diff --git a/models/apis/gamelift/2015-10-01/docs-2.json b/models/apis/gamelift/2015-10-01/docs-2.json index f157d569a37..33736564132 100644 --- a/models/apis/gamelift/2015-10-01/docs-2.json +++ b/models/apis/gamelift/2015-10-01/docs-2.json @@ -2,7 +2,7 @@ "version": "2.0", "service": "Amazon GameLift Service

GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on AWS global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand.

About GameLift solutions

Get more information on these GameLift solutions in the Amazon GameLift Developer Guide.

About this API Reference

This reference guide describes the low-level service API for Amazon GameLift. You can find links to language-specific SDK guides and the AWS CLI reference with each operation and data type topic. Useful links:

", "operations": { - "AcceptMatch": "

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a Game Client

FlexMatch Events Reference

Related operations

", + "AcceptMatch": "

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a Game Client

FlexMatch Events Reference

Related operations

", "ClaimGameServer": "

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server.

To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information.

When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED (using UpdateGameServer) once players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable.

If you try to claim a specific game server, this request will fail in the following cases:

When claiming a specific game server, this request will succeed even if the game server is running on an instance in DRAINING status. To avoid this, first check the instance status by calling DescribeGameServerInstances.

Learn more

GameLift FleetIQ Guide

Related operations

", "CreateAlias": "

Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. An alias provides a level of abstraction for a fleet that is useful when redirecting player traffic from one fleet to another, such as when updating your game build.

Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID and an ARN. You can reassign an alias to another fleet by calling UpdateAlias.

", "CreateBuild": "

Creates a new Amazon GameLift build resource for your game server binary files. Game server binaries must be combined into a zip file for use with Amazon GameLift.

When setting up a new game build for GameLift, we recommend using the AWS CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to a GameLift Amazon S3 location, and (2) it creates a new build resource.

The CreateBuild operation can used in the following scenarios:

If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it.

Learn more

Uploading Your Game

Create a Build with Files in Amazon S3

Related operations

", @@ -10,8 +10,8 @@ "CreateGameServerGroup": "

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Creates a GameLift FleetIQ game server group for managing game hosting on a collection of Amazon EC2 instances for game hosting. This operation creates the game server group, creates an Auto Scaling group in your AWS account, and establishes a link between the two groups. You can view the status of your game server groups in the GameLift console. Game server group metrics and events are emitted to Amazon CloudWatch.

Before creating a new game server group, you must have the following:

To create a new game server group, specify a unique group name, IAM role and Amazon EC2 launch template, and provide a list of instance types that can be used in the group. You must also set initial maximum and minimum limits on the group's instance count. You can optionally set an Auto Scaling policy with target tracking based on a GameLift FleetIQ metric.

Once the game server group and corresponding Auto Scaling group are created, you have full access to change the Auto Scaling group's configuration as needed. Several properties that are set when creating a game server group, including maximum/minimum size and auto-scaling policy settings, must be updated directly in the Auto Scaling group. Keep in mind that some Auto Scaling group properties are periodically updated by GameLift FleetIQ as part of its balancing activities to optimize for availability and cost.

Learn more

GameLift FleetIQ Guide

Related operations

", "CreateGameSession": "

Creates a multiplayer game session for players. This operation creates a game session record and assigns an available server process in the specified fleet to host the game session. A fleet must have an ACTIVE status before a game session can be created in it.

To create a game session, specify either fleet ID or alias ID and indicate a maximum number of players to allow in the game session. You can also provide a name and game-specific properties for this game session. If successful, a GameSession object is returned containing the game session properties and other settings you specified.

Idempotency tokens. You can add a token that uniquely identifies game session requests. This is useful for ensuring that game session requests are idempotent. Multiple requests with the same idempotency token are processed only once; subsequent requests return the original result. All response values are the same with the exception of game session status, which may change.

Resource creation limits. If you are creating a game session on a fleet with a resource creation limit policy in force, then you must specify a creator ID. Without this ID, Amazon GameLift has no way to evaluate the policy for this new game session request.

Player acceptance policy. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

Game session logs. Logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

Available in Amazon GameLift Local.

", "CreateGameSessionQueue": "

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple Regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

Learn more

Design a Game Session Queue

Create a Game Session Queue

Related operations

", - "CreateMatchmakingConfiguration": "

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

To track the progress of matchmaking tickets, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. An alternative method, continuously poling ticket status with DescribeMatchmaking, should only be used for games in development with low matchmaking usage.

Learn more

Design a FlexMatch Matchmaker

Set Up FlexMatch Event Notification

Related operations

", - "CreateMatchmakingRuleSet": "

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

", + "CreateMatchmakingConfiguration": "

Defines a new matchmaking configuration for use with FlexMatch. Whether your are using FlexMatch with GameLift hosting or as a standalone matchmaking service, the matchmaking configuration sets out rules for matching players and forming teams. If you're also using GameLift hosting, it defines how to start game sessions for each match. Your matchmaking system can use multiple configurations to handle different game scenarios. All matchmaking requests (StartMatchmaking or StartMatchBackfill) identify the matchmaking configuration to use and provide player attributes consistent with that configuration.

To create a matchmaking configuration, you must provide the following: configuration name and FlexMatch mode (with or without GameLift hosting); a rule set that specifies how to evaluate players and find acceptable matches; whether player acceptance is required; and the maximum time allowed for a matchmaking attempt. When using FlexMatch with GameLift hosting, you also need to identify the game session queue to use when starting a game session for the match.

In addition, you must set up an Amazon Simple Notification Service (SNS) to receive matchmaking notifications, and provide the topic ARN in the matchmaking configuration. An alternative method, continuously polling ticket status with DescribeMatchmaking, is only suitable for games in development with low matchmaking usage.

Learn more

FlexMatch Developer Guide

Design a FlexMatch Matchmaker

Set Up FlexMatch Event Notification

Related operations

", + "CreateMatchmakingRuleSet": "

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

", "CreatePlayerSession": "

Reserves an open player slot in an active game session. Before a player can be added, a game session must have an ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player slot. To add a group of players to a game session, use CreatePlayerSessions. When the player connects to the game server and references a player session ID, the game server contacts the Amazon GameLift service to validate the player reservation and accept the player.

To create a player session, specify a game session ID, player ID, and optionally a string of player data. If successful, a slot is reserved in the game session for the player and a new PlayerSession object is returned. Player sessions cannot be updated.

Available in Amazon GameLift Local.

", "CreatePlayerSessions": "

Reserves open slots in a game session for a group of players. Before players can be added, a game session must have an ACTIVE status, have a creation policy of ALLOW_ALL, and have an open player slot. To add a single player to a game session, use CreatePlayerSession. When a player connects to the game server and references a player session ID, the game server contacts the Amazon GameLift service to validate the player reservation and accept the player.

To create player sessions, specify a game session ID, a list of player IDs, and optionally a set of player data strings. If successful, a slot is reserved in the game session for each player and a set of new PlayerSession objects is returned. Player sessions cannot be updated.

Available in Amazon GameLift Local.

", "CreateScript": "

Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is executed during an active game session.

To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations:

If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment.

Learn more

Amazon GameLift Realtime Servers

Set Up a Role for Amazon GameLift Access

Related operations

", @@ -23,7 +23,7 @@ "DeleteGameServerGroup": "

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Terminates a game server group and permanently deletes the game server group record. You have several options for how these resources are impacted when deleting the game server group. Depending on the type of delete operation selected, this operation might affect these resources:

To delete a game server group, identify the game server group to delete and specify the type of delete operation to initiate. Game server groups can only be deleted if they are in ACTIVE or ERROR status.

If the delete request is successful, a series of operations are kicked off. The game server group status is changed to DELETE_SCHEDULED, which prevents new game servers from being registered and stops automatic scaling activity. Once all game servers in the game server group are deregistered, GameLift FleetIQ can begin deleting resources. If any of the delete operations fail, the game server group is placed in ERROR status.

GameLift FleetIQ emits delete events to Amazon CloudWatch.

Learn more

GameLift FleetIQ Guide

Related operations

", "DeleteGameSessionQueue": "

Deletes a game session queue. Once a queue is successfully deleted, unfulfilled StartGameSessionPlacement requests that reference the queue will fail. To delete a queue, specify the queue name.

Learn more

Using Multi-Region Queues

Related operations

", "DeleteMatchmakingConfiguration": "

Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.

Related operations

", - "DeleteMatchmakingRuleSet": "

Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.

Learn more

Related operations

", + "DeleteMatchmakingRuleSet": "

Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.

Learn more

Related operations

", "DeleteScalingPolicy": "

Deletes a fleet scaling policy. Once deleted, the policy is no longer in force and GameLift removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

To temporarily suspend scaling policies, call StopFleetActions. This operation suspends all policies for the fleet.

", "DeleteScript": "

Deletes a Realtime script. This operation permanently deletes the script record. If script files were uploaded, they are also deleted (files stored in an S3 bucket are not deleted).

To delete a script, specify the script ID. Before deleting a script, be sure to terminate all fleets that are deployed with the script being deleted. Fleet instances periodically check for script updates, and if the script record no longer exists, the instance will go into an error state and be unable to host game sessions.

Learn more

Amazon GameLift Realtime Servers

Related operations

", "DeleteVpcPeeringAuthorization": "

Cancels a pending VPC peering authorization for the specified VPC. If you need to delete an existing VPC peering connection, call DeleteVpcPeeringConnection.

", @@ -45,9 +45,9 @@ "DescribeGameSessionQueues": "

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the Region.

Learn more

View Your Queues

Related operations

", "DescribeGameSessions": "

Retrieves a set of one or more game sessions. Request a specific game session or request all game sessions on a fleet. Alternatively, use SearchGameSessions to request a set of active game sessions that are filtered by certain criteria. To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails.

To get game sessions, specify one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session matching the request.

Available in Amazon GameLift Local.

", "DescribeInstances": "

Retrieves information about a fleet's instances, including instance IDs. Use this operation to get details on all instances in the fleet or get details on one specific instance.

To get a specific instance, specify fleet ID and instance ID. To get all instances in a fleet, specify a fleet ID only. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, an Instance object is returned for each result.

Learn more

Remotely Access Fleet Instances

Debug Fleet Issues

Related operations

", - "DescribeMatchmaking": "

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including--after a successful match is made--connection information for the resulting new game session.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

This operation is not designed to be continually called to track matchmaking ticket status. This practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking should only be used for games in development with low matchmaking usage.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

", - "DescribeMatchmakingConfigurations": "

Retrieves the details of FlexMatch matchmaking configurations.

This operation offers the following options: (1) retrieve all matchmaking configurations, (2) retrieve configurations for a specified list, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Learn more

Setting Up FlexMatch Matchmakers

Related operations

", - "DescribeMatchmakingRuleSets": "

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related operations

", + "DescribeMatchmaking": "

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including--after a successful match is made--connection information for the resulting new game session.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

This operation is not designed to be continually called to track matchmaking ticket status. This practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking should only be used for games in development with low matchmaking usage.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

", + "DescribeMatchmakingConfigurations": "

Retrieves the details of FlexMatch matchmaking configurations.

This operation offers the following options: (1) retrieve all matchmaking configurations, (2) retrieve configurations for a specified list, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Learn more

Setting Up FlexMatch Matchmakers

Related operations

", + "DescribeMatchmakingRuleSets": "

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related operations

", "DescribePlayerSessions": "

Retrieves properties for one or more player sessions. This operation can be used in several ways: (1) provide a PlayerSessionId to request properties for a specific player session; (2) provide a GameSessionId to request properties for all player sessions in the specified game session; (3) provide a PlayerId to request properties for all player sessions of a specified player.

To get game session record(s), specify only one of the following: a player session ID, a game session ID, or a player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a PlayerSession object is returned for each session matching the request.

Available in Amazon GameLift Local.

", "DescribeRuntimeConfiguration": "

Retrieves a fleet's runtime configuration settings. The runtime configuration tells Amazon GameLift which server processes to run (and how) on each instance in the fleet.

To get a runtime configuration, specify the fleet's unique identifier. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty.

Learn more

Setting up GameLift Fleets

Running Multiple Processes on a Fleet

Related operations

", "DescribeScalingPolicies": "

Retrieves all scaling policies applied to a fleet.

To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.

A fleet may have all of its scaling policies suspended (StopFleetActions). This operation does not affect the status of the scaling policies, which remains ACTIVE. To see whether a fleet's scaling policies are in force or suspended, call DescribeFleetAttributes and check the stopped actions.

", @@ -71,11 +71,11 @@ "SearchGameSessions": "

Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:

Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.

To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.

You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE status. To locate games in statuses other than active, use DescribeGameSessionDetails.

", "StartFleetActions": "

Resumes activity on a fleet that was suspended with StopFleetActions. Currently, this operation is used to restart a fleet's auto-scaling activity.

To start fleet actions, specify the fleet ID and the type of actions to restart. When auto-scaling fleet actions are restarted, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes.

Learn more

Setting up GameLift Fleets

Related operations

", "StartGameSessionPlacement": "

Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.

A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.

When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.

Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players.

To place a new game session request, specify the following:

If successful, a new game session placement is created.

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.

", - "StartMatchBackfill": "

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Learn more

Backfill Existing Games with FlexMatch

How GameLift FlexMatch Works

Related operations

", - "StartMatchmaking": "

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED.

Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

FlexMatch Integration Roadmap

How GameLift FlexMatch Works

Related operations

", + "StartMatchBackfill": "

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Learn more

Backfill Existing Games with FlexMatch

How GameLift FlexMatch Works

Related operations

", + "StartMatchmaking": "

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules. If you're also using GameLift hosting, a new game session is started for the matched players. Each matchmaking request identifies one or more players to find a match for, and specifies the type of match to build, including the team configuration and the rules for an acceptable match. When a matchmaking request identifies a group of players who want to play together, FlexMatch finds additional players to fill the match. Match type, rules, and other features are defined in a MatchmakingConfiguration.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. For each player, you must also include the player attribute values that are required by the matchmaking configuration (in the rule set). If successful, a matchmaking ticket is returned with status set to QUEUED.

Track the status of the ticket to respond as needed. If you're also using GameLift hosting, a successfully completed ticket contains game session connection information. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

FlexMatch Integration Roadmap

How GameLift FlexMatch Works

Related operations

", "StopFleetActions": "

Suspends activity on a fleet. Currently, this operation is used to stop a fleet's auto-scaling activity. It is used to temporarily stop triggering scaling events. The policies can be retained and auto-scaling activity can be restarted using StartFleetActions. You can view a fleet's stopped actions using DescribeFleetAttributes.

To stop fleet actions, specify the fleet ID and the type of actions to suspend. When auto-scaling fleet actions are stopped, Amazon GameLift no longer initiates scaling events except in response to manual changes using UpdateFleetCapacity.

Learn more

Setting up GameLift Fleets

Related operations

", "StopGameSessionPlacement": "

Cancels a game session placement that is in PENDING status. To stop a placement, provide the placement ID values. If successful, the placement is moved to CANCELLED status.

", - "StopMatchmaking": "

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the operation is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

", + "StopMatchmaking": "

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the operation is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

", "SuspendGameServerGroup": "

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Temporarily stops activity on a game server group without terminating instances or the game server group. You can restart activity by calling ResumeGameServerGroup. You can suspend the following activity:

To suspend activity, specify a game server group ARN and the type of activity to be suspended. If successful, a GameServerGroup object is returned showing that the activity is listed in SuspendedActions.

Learn more

GameLift FleetIQ Guide

Related operations

", "TagResource": "

Assigns a tag to a GameLift resource. AWS resource tags provide an additional management tool set. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize AWS cost breakdowns, etc. This operation handles the permissions necessary to manage tags for the following GameLift resource types:

To add a tag to a resource, specify the unique ARN value for the resource and provide a tag list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

", "UntagResource": "

Removes a tag that is assigned to a GameLift resource. Resource tags are used to organize AWS resources for a range of purposes. This operation handles the permissions necessary to manage tags for the following GameLift resource types:

To remove a tag from a resource, specify the unique ARN value for the resource and provide a string list containing one or more tags to be removed. This operation succeeds even if the list includes tags that are not currently assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

", @@ -88,10 +88,10 @@ "UpdateGameServerGroup": "

This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

Updates GameLift FleetIQ-specific properties for a game server group. Many Auto Scaling group properties are updated on the Auto Scaling group directly, including the launch template, Auto Scaling policies, and maximum/minimum/desired instance counts.

To update the game server group, specify the game server group ID and provide the updated values. Before applying the updates, the new values are validated to ensure that GameLift FleetIQ can continue to perform instance balancing activity. If successful, a GameServerGroup object is returned.

Learn more

GameLift FleetIQ Guide

Related operations

", "UpdateGameSession": "

Updates game session properties. This includes the session name, maximum player count, protection policy, which controls whether or not an active game session can be terminated during a scale-down event, and the player session creation policy, which controls whether or not new players can join the session. To update a game session, specify the game session ID and the values you want to change. If successful, an updated GameSession object is returned.

", "UpdateGameSessionQueue": "

Updates settings for a game session queue, which determines how new game session requests in the queue are processed. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.

Learn more

Using Multi-Region Queues

Related operations

", - "UpdateMatchmakingConfiguration": "

Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.

Learn more

Design a FlexMatch Matchmaker

Related operations

", + "UpdateMatchmakingConfiguration": "

Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.

Learn more

Design a FlexMatch Matchmaker

Related operations

", "UpdateRuntimeConfiguration": "

Updates the current runtime configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration object with an updated set of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated runtime configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; runtime configuration changes are applied gradually as existing processes shut down and new processes are launched during Amazon GameLift's normal process recycling activity.

Learn more

Setting up GameLift Fleets

Related operations

", "UpdateScript": "

Updates Realtime script metadata and content.

To update script metadata, specify the script ID and provide updated name and/or version values.

To update script content, provide an updated zip file by pointing to either a local file or an Amazon S3 bucket location. You can use either method regardless of how the original script was uploaded. Use the Version parameter to track updates to the script.

If the call is successful, the updated metadata is stored in the script record and a revised script is uploaded to the Amazon GameLift service. Once the script is updated and acquired by a fleet instance, the new version is used for all new game sessions.

Learn more

Amazon GameLift Realtime Servers

Related operations

", - "ValidateMatchmakingRuleSet": "

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.

Learn more

Related operations

" + "ValidateMatchmakingRuleSet": "

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.

Learn more

Related operations

" }, "shapes": { "AcceptMatchInput": { @@ -196,9 +196,9 @@ "BackfillMode": { "base": null, "refs": { - "CreateMatchmakingConfigurationInput$BackfillMode": "

The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

", - "MatchmakingConfiguration$BackfillMode": "

The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

", - "UpdateMatchmakingConfigurationInput$BackfillMode": "

The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" + "CreateMatchmakingConfigurationInput$BackfillMode": "

The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

", + "MatchmakingConfiguration$BackfillMode": "

The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

", + "UpdateMatchmakingConfigurationInput$BackfillMode": "

The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

" } }, "BalancingStrategy": { @@ -212,9 +212,9 @@ "BooleanModel": { "base": null, "refs": { - "CreateMatchmakingConfigurationInput$AcceptanceRequired": "

A flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

", - "MatchmakingConfiguration$AcceptanceRequired": "

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

", - "UpdateMatchmakingConfigurationInput$AcceptanceRequired": "

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

", + "CreateMatchmakingConfigurationInput$AcceptanceRequired": "

A flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE. With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE to indicate when a completed potential match is waiting for player acceptance.

", + "MatchmakingConfiguration$AcceptanceRequired": "

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE. When this option is enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE to indicate when a completed potential match is waiting for player acceptance.

", + "UpdateMatchmakingConfigurationInput$AcceptanceRequired": "

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE. With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE to indicate when a completed potential match is waiting for player acceptance.

", "ValidateMatchmakingRuleSetOutput$Valid": "

A response indicating whether the rule set is valid.

" } }, @@ -1003,6 +1003,14 @@ "DescribeFleetUtilizationOutput$FleetUtilization": "

A collection of objects containing utilization information for each requested fleet ID.

" } }, + "FlexMatchMode": { + "base": null, + "refs": { + "CreateMatchmakingConfigurationInput$FlexMatchMode": "

Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.

", + "MatchmakingConfiguration$FlexMatchMode": "

Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.

", + "UpdateMatchmakingConfigurationInput$FlexMatchMode": "

Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.

" + } + }, "Float": { "base": null, "refs": { @@ -1034,12 +1042,12 @@ "base": null, "refs": { "CreateGameSessionInput$GameProperties": "

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

", - "CreateMatchmakingConfigurationInput$GameProperties": "

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

", + "CreateMatchmakingConfigurationInput$GameProperties": "

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

", "GameSession$GameProperties": "

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). You can search for active game sessions based on this custom data with SearchGameSessions.

", "GameSessionPlacement$GameProperties": "

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

", - "MatchmakingConfiguration$GameProperties": "

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

", + "MatchmakingConfiguration$GameProperties": "

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used when FlexMatchMode is set to STANDALONE.

", "StartGameSessionPlacementInput$GameProperties": "

Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

", - "UpdateMatchmakingConfigurationInput$GameProperties": "

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "UpdateMatchmakingConfigurationInput$GameProperties": "

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" } }, "GamePropertyValue": { @@ -1123,7 +1131,7 @@ "GameServerGroupDeleteOption": { "base": null, "refs": { - "DeleteGameServerGroupInput$DeleteOption": "

The type of delete to perform. Options include the following:

" + "DeleteGameServerGroupInput$DeleteOption": "

The type of delete to perform. Options include the following:

" } }, "GameServerGroupInstanceType": { @@ -1257,21 +1265,21 @@ } }, "GameSessionConnectionInfo": { - "base": "

Connection information for the new game session that is created with matchmaking. (with StartMatchmaking). Once a match is set, the FlexMatch engine places the match and creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.

", + "base": "

Connection information for a new game session that is created in response to a StartMatchmaking request. Once a match is made, the FlexMatch engine creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.

", "refs": { - "MatchmakingTicket$GameSessionConnectionInfo": "

Identifier and connection information of the game session created for the match. This information is added to the ticket only after the matchmaking request has been successfully completed.

" + "MatchmakingTicket$GameSessionConnectionInfo": "

Identifier and connection information of the game session created for the match. This information is added to the ticket only after the matchmaking request has been successfully completed. This parameter is not set when FlexMatch is being used without GameLift hosting.

" } }, "GameSessionData": { "base": null, "refs": { "CreateGameSessionInput$GameSessionData": "

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

", - "CreateMatchmakingConfigurationInput$GameSessionData": "

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

", + "CreateMatchmakingConfigurationInput$GameSessionData": "

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

", "GameSession$GameSessionData": "

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

", "GameSessionPlacement$GameSessionData": "

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

", - "MatchmakingConfiguration$GameSessionData": "

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

", + "MatchmakingConfiguration$GameSessionData": "

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used when FlexMatchMode is set to STANDALONE.

", "StartGameSessionPlacementInput$GameSessionData": "

Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).

", - "UpdateMatchmakingConfigurationInput$GameSessionData": "

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "UpdateMatchmakingConfigurationInput$GameSessionData": "

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" } }, "GameSessionDetail": { @@ -1656,16 +1664,16 @@ "MatchmakerData": { "base": null, "refs": { - "GameSession$MatchmakerData": "

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

", - "GameSessionPlacement$MatchmakerData": "

Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

" + "GameSession$MatchmakerData": "

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

", + "GameSessionPlacement$MatchmakerData": "

Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

" } }, "MatchmakingAcceptanceTimeoutInteger": { "base": null, "refs": { - "CreateMatchmakingConfigurationInput$AcceptanceTimeoutSeconds": "

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

", - "MatchmakingConfiguration$AcceptanceTimeoutSeconds": "

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

", - "UpdateMatchmakingConfigurationInput$AcceptanceTimeoutSeconds": "

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + "CreateMatchmakingConfigurationInput$AcceptanceTimeoutSeconds": "

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

", + "MatchmakingConfiguration$AcceptanceTimeoutSeconds": "

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

", + "UpdateMatchmakingConfigurationInput$AcceptanceTimeoutSeconds": "

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

" } }, "MatchmakingConfiguration": { @@ -1679,7 +1687,7 @@ "MatchmakingConfigurationArn": { "base": "Data type used for Matchmaking Configuration ARN.", "refs": { - "MatchmakingConfiguration$ConfigurationArn": "

Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift configuration ARN, the resource ID matches the Name value.

", + "MatchmakingConfiguration$ConfigurationArn": "

Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift configuration ARN, the resource ID matches the Name value.

", "MatchmakingTicket$ConfigurationArn": "

The Amazon Resource Name (ARN) associated with the GameLift matchmaking configuration resource that is used with this ticket.

" } }, @@ -1743,7 +1751,7 @@ } }, "MatchmakingRuleSet": { - "base": "

Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

", + "base": "

Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

", "refs": { "CreateMatchmakingRuleSetOutput$RuleSet": "

The newly created matchmaking rule set.

", "MatchmakingRuleSetList$member": null @@ -1840,9 +1848,9 @@ "AwsCredentials$SecretAccessKey": "

Temporary secret key allowing access to the Amazon GameLift S3 account.

", "AwsCredentials$SessionToken": "

Token used to associate a specific build ID with the files uploaded using these credentials.

", "ConflictException$Message": null, - "CreateFleetInput$InstanceRoleArn": "

A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN from the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

", + "CreateFleetInput$InstanceRoleArn": "

A unique identifier for an AWS IAM role that manages access to your AWS services. Fleets with an instance role ARN allow applications that are running on the fleet's instances to assume the role. Learn more about using on-box credentials for your game servers at Access external resources from a game server. To call this operation with instance role ARN, you must have IAM PassRole permissions. See IAM policy examples for GameLift.

", "Event$Message": "

Additional information related to the event.

", - "FleetAttributes$InstanceRoleArn": "

A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN from the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

", + "FleetAttributes$InstanceRoleArn": "

A unique identifier for an AWS IAM role that manages access to your AWS services.

", "FleetCapacityExceededException$Message": null, "GameSessionFullException$Message": null, "IdempotentParameterMismatchException$Message": null, @@ -2101,7 +2109,7 @@ "base": null, "refs": { "MatchmakingTicket$Players": "

A set of Player objects, each representing a player to find matches for. Players are identified by a unique player ID and may include latency data for use during matchmaking. If the ticket is in status COMPLETED, the Player objects include the team the players were assigned to in the resulting match.

", - "StartMatchBackfillInput$Players": "

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

", + "StartMatchBackfillInput$Players": "

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

", "StartMatchmakingInput$Players": "

Information on each player to be matched. This information must include a player ID, and may contain player attributes and latency data to be used in the matchmaking process. After a successful match, Player objects contain the name of the team the player is assigned to.

" } }, @@ -2219,9 +2227,9 @@ "QueueArnsList": { "base": null, "refs": { - "CreateMatchmakingConfigurationInput$GameSessionQueueArns": "

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

", - "MatchmakingConfiguration$GameSessionQueueArns": "

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. GameLift uses the listed queues when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

", - "UpdateMatchmakingConfigurationInput$GameSessionQueueArns": "

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

" + "CreateMatchmakingConfigurationInput$GameSessionQueueArns": "

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

", + "MatchmakingConfiguration$GameSessionQueueArns": "

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. Thais property is not set when FlexMatchMode is set to STANDALONE.

", + "UpdateMatchmakingConfigurationInput$GameSessionQueueArns": "

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

" } }, "RegisterGameServerInput": { @@ -2313,12 +2321,12 @@ "S3Location": { "base": "

The location in S3 where build or script files are stored for access by Amazon GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript requests.

", "refs": { - "CreateBuildInput$StorageLocation": "

Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an S3 bucket that you own. The storage location must specify an S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your S3 bucket. The S3 bucket and your new build must be in the same Region.

", + "CreateBuildInput$StorageLocation": "

The location where your game build files are stored. Use this parameter only when creating a build using files that are stored in an S3 bucket that you own. Identify an S3 bucket name and key, which must in the same Region where you're creating a build. This parameter must also specify the ARN for an IAM role that you've set up to give Amazon GameLift access your S3 bucket. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

", "CreateBuildOutput$StorageLocation": "

Amazon S3 location for your game build file, including bucket name and key.

", - "CreateScriptInput$StorageLocation": "

The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

", + "CreateScriptInput$StorageLocation": "

The Amazon S3 location of your Realtime scripts. The storage location must specify the S3 bucket name, the zip file name (the \"key\"), and an IAM role ARN that allows Amazon GameLift to access the S3 storage location. The S3 bucket must be in the same Region where you are creating a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

", "RequestUploadCredentialsOutput$StorageLocation": "

Amazon S3 path and key, identifying where the game build files are stored.

", "Script$StorageLocation": null, - "UpdateScriptInput$StorageLocation": "

The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" + "UpdateScriptInput$StorageLocation": "

The Amazon S3 location of your Realtime scripts. The storage location must specify the S3 bucket name, the zip file name (the \"key\"), and an IAM role ARN that allows Amazon GameLift to access the S3 storage location. The S3 bucket must be in the same Region as the script you're updating. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

" } }, "ScalingAdjustmentType": { @@ -2413,7 +2421,7 @@ "refs": { "CreateMatchmakingConfigurationInput$NotificationTarget": "

An SNS topic ARN that is set up to receive matchmaking notifications.

", "MatchmakingConfiguration$NotificationTarget": "

An SNS topic ARN that is set up to receive matchmaking notifications.

", - "UpdateMatchmakingConfigurationInput$NotificationTarget": "

An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" + "UpdateMatchmakingConfigurationInput$NotificationTarget": "

An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" } }, "SortOrder": { @@ -2820,7 +2828,7 @@ "VpcSubnets": { "base": null, "refs": { - "CreateGameServerGroupInput$VpcSubnets": "

A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all GameLift FleetIQ-supported Availability Zones are used. You can use this parameter to specify VPCs that you've set up. This property cannot be updated after the game server group is created, and the corresponding Auto Scaling group will always use the property value that is set with this request, even if the Auto Scaling group is updated directly

" + "CreateGameServerGroupInput$VpcSubnets": "

A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all GameLift FleetIQ-supported Availability Zones are used. You can use this parameter to specify VPCs that you've set up. This property cannot be updated after the game server group is created, and the corresponding Auto Scaling group will always use the property value that is set with this request, even if the Auto Scaling group is updated directly.

" } }, "WeightedCapacity": { @@ -2835,7 +2843,7 @@ "CreateGameServerGroupInput$MinSize": "

The minimum number of instances allowed in the EC2 Auto Scaling group. During automatic scaling events, GameLift FleetIQ and EC2 do not scale down the group below this minimum. In production, this value should be set to at least 1. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the AWS console or APIs.

", "CreateGameSessionInput$MaximumPlayerSessionCount": "

The maximum number of players that can be connected simultaneously to the game session.

", "CreateGameSessionQueueInput$TimeoutInSeconds": "

The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

", - "CreateMatchmakingConfigurationInput$AdditionalPlayerCount": "

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

", + "CreateMatchmakingConfigurationInput$AdditionalPlayerCount": "

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.

", "EC2InstanceCounts$DESIRED": "

Ideal number of active instances in the fleet.

", "EC2InstanceCounts$MINIMUM": "

The minimum value allowed for the fleet's instance count.

", "EC2InstanceCounts$MAXIMUM": "

The maximum value allowed for the fleet's instance count.

", @@ -2853,7 +2861,7 @@ "GameSession$MaximumPlayerSessionCount": "

The maximum number of players that can be connected simultaneously to the game session.

", "GameSessionPlacement$MaximumPlayerSessionCount": "

The maximum number of players that can be connected simultaneously to the game session.

", "GameSessionQueue$TimeoutInSeconds": "

The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

", - "MatchmakingConfiguration$AdditionalPlayerCount": "

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

", + "MatchmakingConfiguration$AdditionalPlayerCount": "

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used when FlexMatchMode is set to STANDALONE.

", "MatchmakingTicket$EstimatedWaitTime": "

Average amount of time (in seconds) that players are currently waiting for a match. If there is not enough recent data, this property may be empty.

", "PlayerLatencyPolicy$MaximumIndividualPlayerLatencyMilliseconds": "

The maximum latency value that is allowed for any player, in milliseconds. All policies must have a value set for this property.

", "PlayerLatencyPolicy$PolicyDurationSeconds": "

The length of time, in seconds, that the policy is enforced while placing a new game session. A null value for this property means that the policy is enforced until the queue times out.

", @@ -2865,7 +2873,7 @@ "UpdateFleetCapacityInput$MaxSize": "

The maximum value allowed for the fleet's instance count. Default if not set is 1.

", "UpdateGameSessionInput$MaximumPlayerSessionCount": "

The maximum number of players that can be connected simultaneously to the game session.

", "UpdateGameSessionQueueInput$TimeoutInSeconds": "

The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status.

", - "UpdateMatchmakingConfigurationInput$AdditionalPlayerCount": "

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

" + "UpdateMatchmakingConfigurationInput$AdditionalPlayerCount": "

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" } }, "ZipBlob": { diff --git a/models/apis/iotsitewise/2019-12-02/api-2.json b/models/apis/iotsitewise/2019-12-02/api-2.json index a5abf8eb8ee..dbfcaee2407 100644 --- a/models/apis/iotsitewise/2019-12-02/api-2.json +++ b/models/apis/iotsitewise/2019-12-02/api-2.json @@ -431,6 +431,20 @@ ], "endpoint":{"hostPrefix":"monitor."} }, + "DescribeDefaultEncryptionConfiguration":{ + "name":"DescribeDefaultEncryptionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/configuration/account/encryption" + }, + "input":{"shape":"DescribeDefaultEncryptionConfigurationRequest"}, + "output":{"shape":"DescribeDefaultEncryptionConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ] + }, "DescribeGateway":{ "name":"DescribeGateway", "http":{ @@ -737,6 +751,22 @@ {"shape":"ResourceNotFoundException"} ] }, + "PutDefaultEncryptionConfiguration":{ + "name":"PutDefaultEncryptionConfiguration", + "http":{ + "method":"POST", + "requestUri":"/configuration/account/encryption" + }, + "input":{"shape":"PutDefaultEncryptionConfigurationRequest"}, + "output":{"shape":"PutDefaultEncryptionConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictingOperationException"} + ] + }, "PutLoggingOptions":{ "name":"PutLoggingOptions", "http":{ @@ -1463,6 +1493,33 @@ "min":36, "pattern":"\\S{36,64}" }, + "ConfigurationErrorDetails":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{"shape":"ErrorCode"}, + "message":{"shape":"ErrorMessage"} + } + }, + "ConfigurationState":{ + "type":"string", + "enum":[ + "ACTIVE", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" + ] + }, + "ConfigurationStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"ConfigurationState"}, + "error":{"shape":"ConfigurationErrorDetails"} + } + }, "ConflictingOperationException":{ "type":"structure", "required":[ @@ -2056,6 +2113,23 @@ "dashboardLastUpdateDate":{"shape":"Timestamp"} } }, + "DescribeDefaultEncryptionConfigurationRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeDefaultEncryptionConfigurationResponse":{ + "type":"structure", + "required":[ + "encryptionType", + "configurationStatus" + ], + "members":{ + "encryptionType":{"shape":"EncryptionType"}, + "kmsKeyArn":{"shape":"ARN"}, + "configurationStatus":{"shape":"ConfigurationStatus"} + } + }, "DescribeGatewayCapabilityConfigurationRequest":{ "type":"structure", "required":[ @@ -2237,6 +2311,13 @@ "min":1, "pattern":"[^@]+@[^@]+" }, + "EncryptionType":{ + "type":"string", + "enum":[ + "SITEWISE_DEFAULT_ENCRYPTION", + "KMS_BASED_ENCRYPTION" + ] + }, "EntryId":{ "type":"string", "max":64, @@ -2601,6 +2682,11 @@ "error":{"httpStatusCode":400}, "exception":true }, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1 + }, "LimitExceededException":{ "type":"structure", "required":["message"], @@ -3170,6 +3256,26 @@ "propertyValues":{"shape":"AssetPropertyValues"} } }, + "PutDefaultEncryptionConfigurationRequest":{ + "type":"structure", + "required":["encryptionType"], + "members":{ + "encryptionType":{"shape":"EncryptionType"}, + "kmsKeyId":{"shape":"KmsKeyId"} + } + }, + "PutDefaultEncryptionConfigurationResponse":{ + "type":"structure", + "required":[ + "encryptionType", + "configurationStatus" + ], + "members":{ + "encryptionType":{"shape":"EncryptionType"}, + "kmsKeyArn":{"shape":"ARN"}, + "configurationStatus":{"shape":"ConfigurationStatus"} + } + }, "PutLoggingOptionsRequest":{ "type":"structure", "required":["loggingOptions"], diff --git a/models/apis/iotsitewise/2019-12-02/docs-2.json b/models/apis/iotsitewise/2019-12-02/docs-2.json index 49f03a1ccc0..7a4219e6902 100644 --- a/models/apis/iotsitewise/2019-12-02/docs-2.json +++ b/models/apis/iotsitewise/2019-12-02/docs-2.json @@ -12,7 +12,7 @@ "CreateDashboard": "

Creates a dashboard in an AWS IoT SiteWise Monitor project.

", "CreateGateway": "

Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to AWS IoT SiteWise. For more information, see Ingesting data using a gateway in the AWS IoT SiteWise User Guide.

", "CreatePortal": "

Creates a portal, which can contain projects and dashboards. AWS IoT SiteWise Monitor uses AWS SSO or IAM to authenticate portal users and manage user permissions.

Before you can sign in to a new portal, you must add at least one identity to that portal. For more information, see Adding or removing portal administrators in the AWS IoT SiteWise User Guide.

", - "CreatePresignedPortalUrl": "

Creates a pre-signed URL to a portal. Use this operation to create URLs to portals that use AWS Identity and Access Management (IAM) to authenticate users. An IAM user with access to a portal can call this API to get a URL to that portal. The URL contains a session token that lets the IAM user access the portal.

", + "CreatePresignedPortalUrl": "

Creates a pre-signed URL to a portal. Use this operation to create URLs to portals that use AWS Identity and Access Management (IAM) to authenticate users. An IAM user with access to a portal can call this API to get a URL to that portal. The URL contains an authentication token that lets the IAM user access the portal.

", "CreateProject": "

Creates a project in the specified portal.

", "DeleteAccessPolicy": "

Deletes an access policy that grants the specified identity access to the specified AWS IoT SiteWise Monitor resource. You can use this operation to revoke access to an AWS IoT SiteWise Monitor resource.

", "DeleteAsset": "

Deletes an asset. This action can't be undone. For more information, see Deleting assets and models in the AWS IoT SiteWise User Guide.

You can't delete an asset that's associated to another asset. For more information, see DisassociateAssets.

", @@ -26,6 +26,7 @@ "DescribeAssetModel": "

Retrieves information about an asset model.

", "DescribeAssetProperty": "

Retrieves information about an asset property.

When you call this operation for an attribute property, this response includes the default attribute value that you define in the asset model. If you update the default value in the model, this operation's response includes the new default value.

This operation doesn't return the value of the asset property. To get the value of an asset property, use GetAssetPropertyValue.

", "DescribeDashboard": "

Retrieves information about a dashboard.

", + "DescribeDefaultEncryptionConfiguration": "

Retrieves information about the default encryption configuration for the AWS account in the default or specified region. For more information, see Key management in the AWS IoT SiteWise User Guide.

", "DescribeGateway": "

Retrieves information about a gateway.

", "DescribeGatewayCapabilityConfiguration": "

Retrieves information about a gateway capability configuration. Each gateway capability defines data sources for a gateway. A capability configuration can contain multiple data source configurations. If you define OPC-UA sources for a gateway in the AWS IoT SiteWise console, all of your OPC-UA sources are stored in one capability configuration. To list all capability configurations for a gateway, use DescribeGateway.

", "DescribeLoggingOptions": "

Retrieves the current AWS IoT SiteWise logging options.

", @@ -45,6 +46,7 @@ "ListProjectAssets": "

Retrieves a paginated list of assets associated with an AWS IoT SiteWise Monitor project.

", "ListProjects": "

Retrieves a paginated list of projects for an AWS IoT SiteWise Monitor portal.

", "ListTagsForResource": "

Retrieves the list of tags for an AWS IoT SiteWise resource.

", + "PutDefaultEncryptionConfiguration": "

Sets the default encryption configuration for the AWS account. For more information, see Key management in the AWS IoT SiteWise User Guide.

", "PutLoggingOptions": "

Sets logging options for AWS IoT SiteWise.

", "TagResource": "

Adds tags to an AWS IoT SiteWise resource. If a tag already exists for the resource, this operation updates the tag's value.

", "UntagResource": "

Removes a tag from an AWS IoT SiteWise resource.

", @@ -77,6 +79,7 @@ "DescribeAssetModelResponse$assetModelArn": "

The ARN of the asset model, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

", "DescribeAssetResponse$assetArn": "

The ARN of the asset, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

", "DescribeDashboardResponse$dashboardArn": "

The ARN of the dashboard, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}

", + "DescribeDefaultEncryptionConfigurationResponse$kmsKeyArn": "

The key ARN of the customer managed customer master key (CMK) used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION.

", "DescribeGatewayResponse$gatewayArn": "

The ARN of the gateway, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}

", "DescribePortalResponse$portalArn": "

The ARN of the portal, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:portal/${PortalId}

", "DescribePortalResponse$roleArn": "

The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

", @@ -85,6 +88,7 @@ "IAMUserIdentity$arn": "

The ARN of the IAM user. IAM users must have the iotsitewise:CreatePresignedPortalUrl permission to sign in to the portal. For more information, see IAM ARNs in the IAM User Guide.

If you delete the IAM user, access policies that contain this identity include an empty arn. You can delete the access policy for the IAM user that no longer exists.

", "ListAccessPoliciesRequest$iamArn": "

The ARN of the IAM user. For more information, see IAM ARNs in the IAM User Guide. This parameter is required if you specify IAM for identityType.

", "PortalSummary$roleArn": "

The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

", + "PutDefaultEncryptionConfigurationResponse$kmsKeyArn": "

The Key ARN of the AWS KMS CMK used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION.

", "UpdatePortalRequest$roleArn": "

The ARN of a service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

" } }, @@ -492,6 +496,25 @@ "UpdateProjectRequest$clientToken": "

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

" } }, + "ConfigurationErrorDetails": { + "base": "

", + "refs": { + "ConfigurationStatus$error": "

" + } + }, + "ConfigurationState": { + "base": null, + "refs": { + "ConfigurationStatus$state": "

" + } + }, + "ConfigurationStatus": { + "base": "

", + "refs": { + "DescribeDefaultEncryptionConfigurationResponse$configurationStatus": "

The status of the account configuration. This contains the ConfigurationState. If there's an error, it also contains the ErrorDetails.

", + "PutDefaultEncryptionConfigurationResponse$configurationStatus": "

The status of the account configuration. This contains the ConfigurationState. If there is an error, it also contains the ErrorDetails.

" + } + }, "ConflictingOperationException": { "base": "

Your request has conflicting operations. This can occur if you're trying to perform more than one operation on the same resource at the same time.

", "refs": { @@ -718,6 +741,16 @@ "refs": { } }, + "DescribeDefaultEncryptionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "DescribeDefaultEncryptionConfigurationResponse": { + "base": null, + "refs": { + } + }, "DescribeGatewayCapabilityConfigurationRequest": { "base": null, "refs": { @@ -802,6 +835,14 @@ "UpdatePortalRequest$portalContactEmail": "

The AWS administrator's contact email address.

" } }, + "EncryptionType": { + "base": null, + "refs": { + "DescribeDefaultEncryptionConfigurationResponse$encryptionType": "

The type of encryption used for the encryption configuration.

", + "PutDefaultEncryptionConfigurationRequest$encryptionType": "

The type of encryption used for the encryption configuration.

", + "PutDefaultEncryptionConfigurationResponse$encryptionType": "

The type of encryption used for the encryption configuration.

" + } + }, "EntryId": { "base": null, "refs": { @@ -812,6 +853,7 @@ "ErrorCode": { "base": null, "refs": { + "ConfigurationErrorDetails$code": "

", "ErrorDetails$code": "

The error code.

" } }, @@ -826,6 +868,7 @@ "base": null, "refs": { "BatchPutAssetPropertyError$errorMessage": "

The associated error message.

", + "ConfigurationErrorDetails$message": "

", "ConflictingOperationException$message": null, "ErrorDetails$message": "

The error message.

", "InternalFailureException$message": null, @@ -1123,6 +1166,12 @@ "refs": { } }, + "KmsKeyId": { + "base": null, + "refs": { + "PutDefaultEncryptionConfigurationRequest$kmsKeyId": "

The Key ID of the customer managed customer master key (CMK) used for AWS KMS encryption. This is required if you use KMS_BASED_ENCRYPTION.

" + } + }, "LimitExceededException": { "base": "

You've reached the limit for a resource. For example, this can occur if you're trying to associate more than the allowed number of child assets or attempting to create more than the allowed number of properties for an asset model.

For more information, see Quotas in the AWS IoT SiteWise User Guide.

", "refs": { @@ -1540,6 +1589,16 @@ "PutAssetPropertyValueEntries$member": null } }, + "PutDefaultEncryptionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "PutDefaultEncryptionConfigurationResponse": { + "base": null, + "refs": { + } + }, "PutLoggingOptionsRequest": { "base": null, "refs": { @@ -1624,7 +1683,7 @@ "SessionDurationSeconds": { "base": null, "refs": { - "CreatePresignedPortalUrlRequest$sessionDurationSeconds": "

The duration (in seconds) for which the session at the URL is valid.

Default: 900 seconds (15 minutes)

" + "CreatePresignedPortalUrlRequest$sessionDurationSeconds": "

The duration (in seconds) for which the session at the URL is valid.

Default: 43,200 seconds (12 hours)

" } }, "TagKey": { @@ -1858,7 +1917,7 @@ "base": null, "refs": { "CreatePortalResponse$portalStartUrl": "

The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.

", - "CreatePresignedPortalUrlResponse$presignedPortalUrl": "

The pre-signed URL to the portal. The URL contains the portal ID and a session token that lets you access the portal. The URL has the following format.

https://<portal-id>.app.iotsitewise.aws/auth?token=<encrypted-token>

", + "CreatePresignedPortalUrlResponse$presignedPortalUrl": "

The pre-signed URL to the portal. The URL contains the portal ID and an authentication token that lets you access the portal. The URL has the following format.

https://<portal-id>.app.iotsitewise.aws/iam?token=<encrypted-token>

", "DescribePortalResponse$portalStartUrl": "

The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.

", "ImageLocation$url": "

The URL where the image is available. The URL is valid for 15 minutes so that you can view and download the image

", "PortalSummary$startUrl": "

The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.

" diff --git a/models/apis/lex-models/2017-04-19/api-2.json b/models/apis/lex-models/2017-04-19/api-2.json index f5cc8e83f13..90116bc0ef0 100644 --- a/models/apis/lex-models/2017-04-19/api-2.json +++ b/models/apis/lex-models/2017-04-19/api-2.json @@ -1965,6 +1965,7 @@ "en-AU", "en-GB", "en-US", + "es-419", "es-ES", "es-US", "fr-FR", diff --git a/models/apis/mediaconvert/2017-08-29/api-2.json b/models/apis/mediaconvert/2017-08-29/api-2.json index ab03c69f424..663d0a5a3b5 100644 --- a/models/apis/mediaconvert/2017-08-29/api-2.json +++ b/models/apis/mediaconvert/2017-08-29/api-2.json @@ -2233,6 +2233,13 @@ "DISABLED" ] }, + "CmfcAudioDuration": { + "type": "string", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "CmfcScte35Esam": { "type": "string", "enum": [ @@ -2250,6 +2257,10 @@ "CmfcSettings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "CmfcAudioDuration", + "locationName": "audioDuration" + }, "Scte35Esam": { "shape": "CmfcScte35Esam", "locationName": "scte35Esam" @@ -2668,6 +2679,10 @@ "shape": "__integerMin0Max2147483647", "locationName": "minBufferTime" }, + "MinFinalSegmentLength": { + "shape": "__doubleMin0Max2147483647", + "locationName": "minFinalSegmentLength" + }, "MpdProfile": { "shape": "DashIsoMpdProfile", "locationName": "mpdProfile" @@ -4992,7 +5007,7 @@ "locationName": "denoiseFilter" }, "FileInput": { - "shape": "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL", + "shape": "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA", "locationName": "fileInput" }, "FilterEnable": { @@ -5959,6 +5974,13 @@ "ATSC" ] }, + "M2tsAudioDuration": { + "type": "string", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "M2tsBufferModel": { "type": "string", "enum": [ @@ -6056,6 +6078,10 @@ "shape": "M2tsAudioBufferModel", "locationName": "audioBufferModel" }, + "AudioDuration": { + "shape": "M2tsAudioDuration", + "locationName": "audioDuration" + }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", "locationName": "audioFramesPerPes" @@ -6198,6 +6224,13 @@ } } }, + "M3u8AudioDuration": { + "type": "string", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "M3u8NielsenId3": { "type": "string", "enum": [ @@ -6222,6 +6255,10 @@ "M3u8Settings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "M3u8AudioDuration", + "locationName": "audioDuration" + }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", "locationName": "audioFramesPerPes" @@ -6490,6 +6527,10 @@ "Mp4Settings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "CmfcAudioDuration", + "locationName": "audioDuration" + }, "CslgAtom": { "shape": "Mp4CslgAtom", "locationName": "cslgAtom" @@ -6512,6 +6553,20 @@ } } }, + "MpdAccessibilityCaptionHints": { + "type": "string", + "enum": [ + "INCLUDE", + "EXCLUDE" + ] + }, + "MpdAudioDuration": { + "type": "string", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "MpdCaptionContainerType": { "type": "string", "enum": [ @@ -6536,6 +6591,14 @@ "MpdSettings": { "type": "structure", "members": { + "AccessibilityCaptionHints": { + "shape": "MpdAccessibilityCaptionHints", + "locationName": "accessibilityCaptionHints" + }, + "AudioDuration": { + "shape": "MpdAudioDuration", + "locationName": "audioDuration" + }, "CaptionContainerType": { "shape": "MpdCaptionContainerType", "locationName": "captionContainerType" @@ -9600,9 +9663,9 @@ "type": "string", "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, - "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { + "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA": { "type": "string", - "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, "__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { "type": "string", diff --git a/models/apis/mediaconvert/2017-08-29/docs-2.json b/models/apis/mediaconvert/2017-08-29/docs-2.json index 12bef398edf..d640333963b 100644 --- a/models/apis/mediaconvert/2017-08-29/docs-2.json +++ b/models/apis/mediaconvert/2017-08-29/docs-2.json @@ -512,9 +512,9 @@ } }, "CmafClientCache": { - "base": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay.", + "base": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", "refs": { - "CmafGroupSettings$ClientCache": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay." + "CmafGroupSettings$ClientCache": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." } }, "CmafCodecSpecification": { @@ -601,6 +601,13 @@ "CmafGroupSettings$WriteSegmentTimelineInRepresentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." } }, + "CmfcAudioDuration": { + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "refs": { + "CmfcSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "Mp4Settings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + } + }, "CmfcScte35Esam": { "base": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", "refs": { @@ -1602,9 +1609,9 @@ } }, "HlsClientCache": { - "base": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay.", + "base": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", "refs": { - "HlsGroupSettings$ClientCache": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay." + "HlsGroupSettings$ClientCache": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." } }, "HlsCodecSpecification": { @@ -1948,6 +1955,12 @@ "M2tsSettings$AudioBufferModel": "Selects between the DVB and ATSC buffer models for Dolby Digital audio." } }, + "M2tsAudioDuration": { + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "refs": { + "M2tsSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + } + }, "M2tsBufferModel": { "base": "Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, use multiplex buffer model. If set to NONE, this can lead to lower latency, but low-memory devices may not be able to play back the stream without interruptions.", "refs": { @@ -2026,6 +2039,12 @@ "ContainerSettings$M2tsSettings": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." } }, + "M3u8AudioDuration": { + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "refs": { + "M3u8Settings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + } + }, "M3u8NielsenId3": { "base": "If INSERT, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output.", "refs": { @@ -2159,6 +2178,18 @@ "ContainerSettings$Mp4Settings": "Settings for MP4 container. You can create audio-only AAC outputs with this container." } }, + "MpdAccessibilityCaptionHints": { + "base": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: ", + "refs": { + "MpdSettings$AccessibilityCaptionHints": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: " + } + }, + "MpdAudioDuration": { + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "refs": { + "MpdSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + } + }, "MpdCaptionContainerType": { "base": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw (RAW) for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files.", "refs": { @@ -3099,6 +3130,7 @@ "base": null, "refs": { "CmafGroupSettings$MinFinalSegmentLength": "Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds.", + "DashIsoGroupSettings$MinFinalSegmentLength": "Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds.", "HlsGroupSettings$MinFinalSegmentLength": "Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds." } }, @@ -4086,8 +4118,8 @@ "__mapOf__string": { "base": null, "refs": { - "CreateJobRequest$Tags": "Optional. The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key.", - "CreateJobRequest$UserMetadata": "Optional. User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs.", + "CreateJobRequest$Tags": "Optional. The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key. Use standard AWS tags on your job for automatic integration with AWS services and for custom integrations and workflows.", + "CreateJobRequest$UserMetadata": "Optional. User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs. Use only for existing integrations or workflows that rely on job metadata tags. Otherwise, we recommend that you use standard AWS tags.", "CreateJobTemplateRequest$Tags": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key.", "CreatePresetRequest$Tags": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key.", "CreateQueueRequest$Tags": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key.", @@ -4427,7 +4459,7 @@ "__listOf__stringPatternS3ASSETMAPXml$member": null } }, - "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { + "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA": { "base": null, "refs": { "Input$FileInput": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs that contain assets referenced by the CPL." diff --git a/models/apis/mwaa/2020-07-01/api-2.json b/models/apis/mwaa/2020-07-01/api-2.json new file mode 100644 index 00000000000..f0671d9d7d4 --- /dev/null +++ b/models/apis/mwaa/2020-07-01/api-2.json @@ -0,0 +1,894 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-01", + "endpointPrefix":"airflow", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AmazonMWAA", + "serviceId":"MWAA", + "signatureVersion":"v4", + "signingName":"airflow", + "uid":"mwaa-2020-07-01" + }, + "operations":{ + "CreateCliToken":{ + "name":"CreateCliToken", + "http":{ + "method":"POST", + "requestUri":"/clitoken/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateCliTokenRequest"}, + "output":{"shape":"CreateCliTokenResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "endpoint":{"hostPrefix":"env."} + }, + "CreateEnvironment":{ + "name":"CreateEnvironment", + "http":{ + "method":"PUT", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateEnvironmentInput"}, + "output":{"shape":"CreateEnvironmentOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "CreateWebLoginToken":{ + "name":"CreateWebLoginToken", + "http":{ + "method":"POST", + "requestUri":"/webtoken/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateWebLoginTokenRequest"}, + "output":{"shape":"CreateWebLoginTokenResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"env."}, + "idempotent":true + }, + "DeleteEnvironment":{ + "name":"DeleteEnvironment", + "http":{ + "method":"DELETE", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"DeleteEnvironmentInput"}, + "output":{"shape":"DeleteEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "GetEnvironment":{ + "name":"GetEnvironment", + "http":{ + "method":"GET", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"GetEnvironmentInput"}, + "output":{"shape":"GetEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"api."} + }, + "ListEnvironments":{ + "name":"ListEnvironments", + "http":{ + "method":"GET", + "requestUri":"/environments", + "responseCode":200 + }, + "input":{"shape":"ListEnvironmentsInput"}, + "output":{"shape":"ListEnvironmentsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"api."} + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"api."} + }, + "PublishMetrics":{ + "name":"PublishMetrics", + "http":{ + "method":"POST", + "requestUri":"/metrics/environments/{EnvironmentName}", + "responseCode":200 + }, + "input":{"shape":"PublishMetricsInput"}, + "output":{"shape":"PublishMetricsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"ops."} + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"api."} + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "UpdateEnvironment":{ + "name":"UpdateEnvironment", + "http":{ + "method":"PATCH", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"UpdateEnvironmentInput"}, + "output":{"shape":"UpdateEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "endpoint":{"hostPrefix":"api."} + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"} + }, + "AirflowVersion":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[0-9a-z.]+$" + }, + "CloudWatchLogGroupArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:logs:[a-z0-9\\-]+:\\d{12}:log-group:\\w+" + }, + "ConfigKey":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-z]+([a-z._]*[a-z]+)?$" + }, + "ConfigValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*" + }, + "CreateCliTokenRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "location":"uri", + "locationName":"Name" + } + } + }, + "CreateCliTokenResponse":{ + "type":"structure", + "members":{ + "CliToken":{"shape":"SyntheticCreateCliTokenResponseToken"}, + "WebServerHostname":{"shape":"Hostname"} + } + }, + "CreateEnvironmentInput":{ + "type":"structure", + "required":[ + "DagS3Path", + "ExecutionRoleArn", + "Name", + "NetworkConfiguration", + "SourceBucketArn" + ], + "members":{ + "AirflowConfigurationOptions":{"shape":"SyntheticCreateEnvironmentInputAirflowConfigurationOptions"}, + "AirflowVersion":{"shape":"AirflowVersion"}, + "DagS3Path":{"shape":"RelativePath"}, + "EnvironmentClass":{"shape":"EnvironmentClass"}, + "ExecutionRoleArn":{"shape":"IamRoleArn"}, + "KmsKey":{"shape":"KmsKey"}, + "LoggingConfiguration":{"shape":"LoggingConfigurationInput"}, + "MaxWorkers":{"shape":"MaxWorkers"}, + "Name":{ + "shape":"EnvironmentName", + "location":"uri", + "locationName":"Name" + }, + "NetworkConfiguration":{"shape":"NetworkConfiguration"}, + "PluginsS3ObjectVersion":{"shape":"S3ObjectVersion"}, + "PluginsS3Path":{"shape":"RelativePath"}, + "RequirementsS3ObjectVersion":{"shape":"S3ObjectVersion"}, + "RequirementsS3Path":{"shape":"RelativePath"}, + "SourceBucketArn":{"shape":"S3BucketArn"}, + "Tags":{"shape":"TagMap"}, + "WebserverAccessMode":{"shape":"WebserverAccessMode"}, + "WeeklyMaintenanceWindowStart":{"shape":"WeeklyMaintenanceWindowStart"} + } + }, + "CreateEnvironmentOutput":{ + "type":"structure", + "members":{ + "Arn":{"shape":"EnvironmentArn"} + } + }, + "CreateWebLoginTokenRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "location":"uri", + "locationName":"Name" + } + } + }, + "CreateWebLoginTokenResponse":{ + "type":"structure", + "members":{ + "WebServerHostname":{"shape":"Hostname"}, + "WebToken":{"shape":"SyntheticCreateWebLoginTokenResponseToken"} + } + }, + "CreatedAt":{"type":"timestamp"}, + "DeleteEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "location":"uri", + "locationName":"Name" + } + } + }, + "DeleteEnvironmentOutput":{ + "type":"structure", + "members":{ + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"String"}, + "Value":{"shape":"String"} + } + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"} + }, + "Double":{ + "type":"double", + "box":true + }, + "Environment":{ + "type":"structure", + "members":{ + "AirflowConfigurationOptions":{"shape":"AirflowConfigurationOptions"}, + "AirflowVersion":{"shape":"AirflowVersion"}, + "Arn":{"shape":"EnvironmentArn"}, + "CreatedAt":{"shape":"CreatedAt"}, + "DagS3Path":{"shape":"RelativePath"}, + "EnvironmentClass":{"shape":"EnvironmentClass"}, + "ExecutionRoleArn":{"shape":"IamRoleArn"}, + "KmsKey":{"shape":"KmsKey"}, + "LastUpdate":{"shape":"LastUpdate"}, + "LoggingConfiguration":{"shape":"LoggingConfiguration"}, + "MaxWorkers":{"shape":"MaxWorkers"}, + "Name":{"shape":"EnvironmentName"}, + "NetworkConfiguration":{"shape":"NetworkConfiguration"}, + "PluginsS3ObjectVersion":{"shape":"S3ObjectVersion"}, + "PluginsS3Path":{"shape":"RelativePath"}, + "RequirementsS3ObjectVersion":{"shape":"S3ObjectVersion"}, + "RequirementsS3Path":{"shape":"RelativePath"}, + "ServiceRoleArn":{"shape":"IamRoleArn"}, + "SourceBucketArn":{"shape":"S3BucketArn"}, + "Status":{"shape":"EnvironmentStatus"}, + "Tags":{"shape":"TagMap"}, + "WebserverAccessMode":{"shape":"WebserverAccessMode"}, + "WebserverUrl":{"shape":"WebserverUrl"}, + "WeeklyMaintenanceWindowStart":{"shape":"WeeklyMaintenanceWindowStart"} + } + }, + "EnvironmentArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:airflow:[a-z0-9\\-]+:\\d{12}:environment/\\w+" + }, + "EnvironmentClass":{ + "type":"string", + "max":1024, + "min":1 + }, + "EnvironmentList":{ + "type":"list", + "member":{"shape":"EnvironmentName"} + }, + "EnvironmentName":{ + "type":"string", + "max":80, + "min":1, + "pattern":"^[a-zA-Z][0-9a-zA-Z-_]*$" + }, + "EnvironmentStatus":{ + "type":"string", + "enum":[ + "CREATING", + "CREATE_FAILED", + "AVAILABLE", + "UPDATING", + "DELETING", + "DELETED" + ] + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.+$" + }, + "GetEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "location":"uri", + "locationName":"Name" + } + } + }, + "GetEnvironmentOutput":{ + "type":"structure", + "members":{ + "Environment":{"shape":"Environment"} + } + }, + "Hostname":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])$" + }, + "IamRoleArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KmsKey":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^(((arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?key\\/)?[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}|(arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?alias/.+)$" + }, + "LastUpdate":{ + "type":"structure", + "members":{ + "CreatedAt":{"shape":"UpdateCreatedAt"}, + "Error":{"shape":"UpdateError"}, + "Status":{"shape":"UpdateStatus"} + } + }, + "ListEnvironmentsInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListEnvironmentsInputMaxResultsInteger", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListEnvironmentsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListEnvironmentsOutput":{ + "type":"structure", + "required":["Environments"], + "members":{ + "Environments":{"shape":"EnvironmentList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagMap"} + } + }, + "LoggingConfiguration":{ + "type":"structure", + "members":{ + "DagProcessingLogs":{"shape":"ModuleLoggingConfiguration"}, + "SchedulerLogs":{"shape":"ModuleLoggingConfiguration"}, + "TaskLogs":{"shape":"ModuleLoggingConfiguration"}, + "WebserverLogs":{"shape":"ModuleLoggingConfiguration"}, + "WorkerLogs":{"shape":"ModuleLoggingConfiguration"} + } + }, + "LoggingConfigurationInput":{ + "type":"structure", + "members":{ + "DagProcessingLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "SchedulerLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "TaskLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "WebserverLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "WorkerLogs":{"shape":"ModuleLoggingConfigurationInput"} + } + }, + "LoggingEnabled":{ + "type":"boolean", + "box":true + }, + "LoggingLevel":{ + "type":"string", + "enum":[ + "CRITICAL", + "ERROR", + "WARNING", + "INFO", + "DEBUG" + ] + }, + "MaxWorkers":{ + "type":"integer", + "box":true, + "min":1 + }, + "MetricData":{ + "type":"list", + "member":{"shape":"MetricDatum"} + }, + "MetricDatum":{ + "type":"structure", + "required":[ + "MetricName", + "Timestamp" + ], + "members":{ + "Dimensions":{"shape":"Dimensions"}, + "MetricName":{"shape":"String"}, + "StatisticValues":{"shape":"StatisticSet"}, + "Timestamp":{"shape":"Timestamp"}, + "Unit":{"shape":"Unit"}, + "Value":{"shape":"Double"} + } + }, + "ModuleLoggingConfiguration":{ + "type":"structure", + "members":{ + "CloudWatchLogGroupArn":{"shape":"CloudWatchLogGroupArn"}, + "Enabled":{"shape":"LoggingEnabled"}, + "LogLevel":{"shape":"LoggingLevel"} + } + }, + "ModuleLoggingConfigurationInput":{ + "type":"structure", + "required":[ + "Enabled", + "LogLevel" + ], + "members":{ + "Enabled":{"shape":"LoggingEnabled"}, + "LogLevel":{"shape":"LoggingLevel"} + } + }, + "NetworkConfiguration":{ + "type":"structure", + "members":{ + "SecurityGroupIds":{"shape":"SecurityGroupList"}, + "SubnetIds":{"shape":"SubnetList"} + } + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":0 + }, + "PublishMetricsInput":{ + "type":"structure", + "required":[ + "EnvironmentName", + "MetricData" + ], + "members":{ + "EnvironmentName":{ + "shape":"EnvironmentName", + "location":"uri", + "locationName":"EnvironmentName" + }, + "MetricData":{"shape":"MetricData"} + } + }, + "PublishMetricsOutput":{ + "type":"structure", + "members":{ + } + }, + "RelativePath":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "S3BucketArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:s3:::airflow-[a-z0-9.\\-]+$" + }, + "S3ObjectVersion":{ + "type":"string", + "max":1024, + "min":1 + }, + "SecurityGroupId":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^sg-[a-zA-Z0-9\\-._]+$" + }, + "SecurityGroupList":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":5, + "min":1 + }, + "StatisticSet":{ + "type":"structure", + "members":{ + "Maximum":{"shape":"Double"}, + "Minimum":{"shape":"Double"}, + "SampleCount":{"shape":"Integer"}, + "Sum":{"shape":"Double"} + } + }, + "String":{"type":"string"}, + "SubnetId":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^subnet-[a-zA-Z0-9\\-._]+$" + }, + "SubnetList":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":2, + "min":2 + }, + "SyntheticCreateCliTokenResponseToken":{ + "type":"string", + "sensitive":true + }, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"}, + "sensitive":true + }, + "SyntheticCreateWebLoginTokenResponseToken":{ + "type":"string", + "sensitive":true + }, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"}, + "sensitive":true + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{"shape":"TagMap"} + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "Timestamp":{"type":"timestamp"}, + "Unit":{ + "type":"string", + "enum":[ + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Count", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second", + "None" + ] + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "tagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "location":"uri", + "locationName":"ResourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateCreatedAt":{"type":"timestamp"}, + "UpdateEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "AirflowConfigurationOptions":{"shape":"SyntheticUpdateEnvironmentInputAirflowConfigurationOptions"}, + "AirflowVersion":{"shape":"AirflowVersion"}, + "DagS3Path":{"shape":"RelativePath"}, + "EnvironmentClass":{"shape":"EnvironmentClass"}, + "ExecutionRoleArn":{"shape":"IamRoleArn"}, + "LoggingConfiguration":{"shape":"LoggingConfigurationInput"}, + "MaxWorkers":{"shape":"MaxWorkers"}, + "Name":{ + "shape":"EnvironmentName", + "location":"uri", + "locationName":"Name" + }, + "NetworkConfiguration":{"shape":"UpdateNetworkConfigurationInput"}, + "PluginsS3ObjectVersion":{"shape":"S3ObjectVersion"}, + "PluginsS3Path":{"shape":"RelativePath"}, + "RequirementsS3ObjectVersion":{"shape":"S3ObjectVersion"}, + "RequirementsS3Path":{"shape":"RelativePath"}, + "SourceBucketArn":{"shape":"S3BucketArn"}, + "WebserverAccessMode":{"shape":"WebserverAccessMode"}, + "WeeklyMaintenanceWindowStart":{"shape":"WeeklyMaintenanceWindowStart"} + } + }, + "UpdateEnvironmentOutput":{ + "type":"structure", + "members":{ + "Arn":{"shape":"EnvironmentArn"} + } + }, + "UpdateError":{ + "type":"structure", + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"ErrorMessage"} + } + }, + "UpdateNetworkConfigurationInput":{ + "type":"structure", + "required":["SecurityGroupIds"], + "members":{ + "SecurityGroupIds":{"shape":"SecurityGroupList"} + } + }, + "UpdateStatus":{ + "type":"string", + "enum":[ + "SUCCESS", + "PENDING", + "FAILED" + ] + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "WebserverAccessMode":{ + "type":"string", + "enum":[ + "PRIVATE_ONLY", + "PUBLIC_ONLY" + ] + }, + "WebserverUrl":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^https://.+$" + }, + "WeeklyMaintenanceWindowStart":{ + "type":"string", + "max":9, + "min":1, + "pattern":"(MON|TUE|WED|THU|FRI|SAT|SUN):([01]\\d|2[0-3]):(00|30)" + } + } +} diff --git a/models/apis/mwaa/2020-07-01/docs-2.json b/models/apis/mwaa/2020-07-01/docs-2.json new file mode 100644 index 00000000000..3ddd72f0d44 --- /dev/null +++ b/models/apis/mwaa/2020-07-01/docs-2.json @@ -0,0 +1,583 @@ +{ + "version": "2.0", + "service": "

Amazon Managed Workflows for Apache Airflow

This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

", + "operations": { + "CreateCliToken": "

Create a CLI token to use Airflow CLI.

", + "CreateEnvironment": "

JSON blob that describes the environment to create.

", + "CreateWebLoginToken": "

Create a JWT token to be used to login to Airflow Web UI with claims based Authentication.

", + "DeleteEnvironment": "

Delete an existing environment.

", + "GetEnvironment": "

Get details of an existing environment.

", + "ListEnvironments": "

List Amazon MWAA Environments.

", + "ListTagsForResource": "

List the tags for MWAA environments.

", + "PublishMetrics": "

An operation for publishing metrics from the customers to the Ops plane.

", + "TagResource": "

Add tag to the MWAA environments.

", + "UntagResource": "

Remove a tag from the MWAA environments.

", + "UpdateEnvironment": "

Update an MWAA environment.

" + }, + "shapes": { + "AccessDeniedException": { + "base": "

Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA user guide to setup permissions to access the Web UI and CLI functionality.

", + "refs": { + } + }, + "AirflowConfigurationOptions": { + "base": null, + "refs": { + "Environment$AirflowConfigurationOptions": "

The Airflow Configuration Options of the Amazon MWAA Environment.

" + } + }, + "AirflowVersion": { + "base": null, + "refs": { + "CreateEnvironmentInput$AirflowVersion": "

The Apache Airflow version you want to use for your environment.

", + "Environment$AirflowVersion": "

The AirflowV ersion of the Amazon MWAA Environment.

", + "UpdateEnvironmentInput$AirflowVersion": "

The Airflow Version to update of your Amazon MWAA environment.

" + } + }, + "CloudWatchLogGroupArn": { + "base": null, + "refs": { + "ModuleLoggingConfiguration$CloudWatchLogGroupArn": "

Provides the ARN for the CloudWatch group where the logs will be published.

" + } + }, + "ConfigKey": { + "base": null, + "refs": { + "AirflowConfigurationOptions$key": null, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions$key": null, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions$key": null + } + }, + "ConfigValue": { + "base": null, + "refs": { + "AirflowConfigurationOptions$value": null, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions$value": null, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions$value": null + } + }, + "CreateCliTokenRequest": { + "base": null, + "refs": { + } + }, + "CreateCliTokenResponse": { + "base": null, + "refs": { + } + }, + "CreateEnvironmentInput": { + "base": "

This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation to create an environment. For more information, see Get started with Amazon Managed Workflows for Apache Airflow.

", + "refs": { + } + }, + "CreateEnvironmentOutput": { + "base": null, + "refs": { + } + }, + "CreateWebLoginTokenRequest": { + "base": null, + "refs": { + } + }, + "CreateWebLoginTokenResponse": { + "base": null, + "refs": { + } + }, + "CreatedAt": { + "base": null, + "refs": { + "Environment$CreatedAt": "

The Created At date of the Amazon MWAA Environment.

" + } + }, + "DeleteEnvironmentInput": { + "base": null, + "refs": { + } + }, + "DeleteEnvironmentOutput": { + "base": null, + "refs": { + } + }, + "Dimension": { + "base": "

Internal only API.

", + "refs": { + "Dimensions$member": null + } + }, + "Dimensions": { + "base": null, + "refs": { + "MetricDatum$Dimensions": "

Internal only API.

" + } + }, + "Double": { + "base": null, + "refs": { + "MetricDatum$Value": "

Internal only API.

", + "StatisticSet$Maximum": "

Internal only API.

", + "StatisticSet$Minimum": "

Internal only API.

", + "StatisticSet$Sum": "

Internal only API.

" + } + }, + "Environment": { + "base": "

An Amazon MWAA environment.

", + "refs": { + "GetEnvironmentOutput$Environment": "

A JSON blob with environment details.

" + } + }, + "EnvironmentArn": { + "base": null, + "refs": { + "CreateEnvironmentOutput$Arn": "

The resulting Amazon MWAA envirnonment ARN.

", + "Environment$Arn": "

The ARN of the Amazon MWAA Environment.

", + "ListTagsForResourceInput$ResourceArn": "

The ARN of the MWAA environment.

", + "TagResourceInput$ResourceArn": "

The tag resource ARN of the MWAA environments.

", + "UntagResourceInput$ResourceArn": "

The tag resource ARN of the MWAA environments.

", + "UpdateEnvironmentOutput$Arn": "

The ARN to update of your Amazon MWAA environment.

" + } + }, + "EnvironmentClass": { + "base": null, + "refs": { + "CreateEnvironmentInput$EnvironmentClass": "

The environment class you want to use for your environment. The environment class determines the size of the containers and database used for your Apache Airflow services.

", + "Environment$EnvironmentClass": "

The Environment Class (size) of the Amazon MWAA Environment.

", + "UpdateEnvironmentInput$EnvironmentClass": "

The Environment Class to update of your Amazon MWAA environment.

" + } + }, + "EnvironmentList": { + "base": null, + "refs": { + "ListEnvironmentsOutput$Environments": "

The list of Amazon MWAA Environments.

" + } + }, + "EnvironmentName": { + "base": null, + "refs": { + "CreateCliTokenRequest$Name": "

Create a CLI token request for a MWAA environment.

", + "CreateEnvironmentInput$Name": "

The name of your MWAA environment.

", + "CreateWebLoginTokenRequest$Name": "

Create an Airflow Web UI login token request for a MWAA environment.

", + "DeleteEnvironmentInput$Name": "

The name of the environment to delete.

", + "Environment$Name": "

The name of the Amazon MWAA Environment.

", + "EnvironmentList$member": null, + "GetEnvironmentInput$Name": "

The name of the environment to retrieve.

", + "PublishMetricsInput$EnvironmentName": "

Publishes environment metric data to Amazon CloudWatch.

", + "UpdateEnvironmentInput$Name": "

The name of your Amazon MWAA environment that you wish to update.

" + } + }, + "EnvironmentStatus": { + "base": null, + "refs": { + "Environment$Status": "

The status of the Amazon MWAA Environment.

" + } + }, + "ErrorCode": { + "base": null, + "refs": { + "UpdateError$ErrorCode": "

Error code of update.

" + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "UpdateError$ErrorMessage": "

Error message of update.

" + } + }, + "GetEnvironmentInput": { + "base": null, + "refs": { + } + }, + "GetEnvironmentOutput": { + "base": null, + "refs": { + } + }, + "Hostname": { + "base": null, + "refs": { + "CreateCliTokenResponse$WebServerHostname": "

Create an Airflow CLI login token response for the provided webserver hostname.

", + "CreateWebLoginTokenResponse$WebServerHostname": "

Create an Airflow Web UI login token response for the provided webserver hostname.

" + } + }, + "IamRoleArn": { + "base": null, + "refs": { + "CreateEnvironmentInput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an AWS Identity and Access Management (IAM) role that grants MWAA permission to access AWS services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Managing access to Amazon Managed Workflows for Apache Airflow.

", + "Environment$ExecutionRoleArn": "

The Execution Role ARN of the Amazon MWAA Environment.

", + "Environment$ServiceRoleArn": "

The Service Role ARN of the Amazon MWAA Environment.

", + "UpdateEnvironmentInput$ExecutionRoleArn": "

The Executio Role ARN to update of your Amazon MWAA environment.

" + } + }, + "Integer": { + "base": null, + "refs": { + "StatisticSet$SampleCount": "

Internal only API.

" + } + }, + "InternalServerException": { + "base": "

InternalServerException: An internal error has occurred.

", + "refs": { + } + }, + "KmsKey": { + "base": null, + "refs": { + "CreateEnvironmentInput$KmsKey": "

The AWS Key Management Service (KMS) key to encrypt and decrypt the data in your environment. You can use an AWS KMS key managed by MWAA, or a custom KMS key (advanced). For more information, see Customer master keys (CMKs) in the AWS KMS developer guide.

", + "Environment$KmsKey": "

The Kms Key of the Amazon MWAA Environment.

" + } + }, + "LastUpdate": { + "base": "

Last update information for the environment.

", + "refs": { + "Environment$LastUpdate": null + } + }, + "ListEnvironmentsInput": { + "base": null, + "refs": { + } + }, + "ListEnvironmentsInputMaxResultsInteger": { + "base": null, + "refs": { + "ListEnvironmentsInput$MaxResults": "

The maximum results when listing MWAA environments.

" + } + }, + "ListEnvironmentsOutput": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceInput": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceOutput": { + "base": null, + "refs": { + } + }, + "LoggingConfiguration": { + "base": "

The Logging Configuration of your Amazon MWAA environment.

", + "refs": { + "Environment$LoggingConfiguration": "

The Logging Configuration of the Amazon MWAA Environment.

" + } + }, + "LoggingConfigurationInput": { + "base": "

The Logging Configuration of your Amazon MWAA environment.

", + "refs": { + "CreateEnvironmentInput$LoggingConfiguration": "

The Apache Airflow logs you want to send to Amazon CloudWatch Logs.

", + "UpdateEnvironmentInput$LoggingConfiguration": "

The Logging Configuration to update of your Amazon MWAA environment.

" + } + }, + "LoggingEnabled": { + "base": null, + "refs": { + "ModuleLoggingConfiguration$Enabled": "

Defines that the logging module is enabled.

", + "ModuleLoggingConfigurationInput$Enabled": "

Defines that the logging module is enabled.

" + } + }, + "LoggingLevel": { + "base": null, + "refs": { + "ModuleLoggingConfiguration$LogLevel": "

Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO.

", + "ModuleLoggingConfigurationInput$LogLevel": "

Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO.

" + } + }, + "MaxWorkers": { + "base": null, + "refs": { + "CreateEnvironmentInput$MaxWorkers": "

The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers and the Fargate containers that run your tasks up to the number you specify in this field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra containers leaving the one worker that is included with your environment.

", + "Environment$MaxWorkers": "

The Maximum Workers of the Amazon MWAA Environment.

", + "UpdateEnvironmentInput$MaxWorkers": "

The Maximum Workers to update of your Amazon MWAA environment.

" + } + }, + "MetricData": { + "base": null, + "refs": { + "PublishMetricsInput$MetricData": "

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metrica.

" + } + }, + "MetricDatum": { + "base": "

Internal only API.

", + "refs": { + "MetricData$member": null + } + }, + "ModuleLoggingConfiguration": { + "base": "

A JSON blob that provides configuration to use for logging with respect to the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, and WorkerLogs.

", + "refs": { + "LoggingConfiguration$DagProcessingLogs": null, + "LoggingConfiguration$SchedulerLogs": null, + "LoggingConfiguration$TaskLogs": null, + "LoggingConfiguration$WebserverLogs": null, + "LoggingConfiguration$WorkerLogs": null + } + }, + "ModuleLoggingConfigurationInput": { + "base": "

A JSON blob that provides configuration to use for logging with respect to the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, and WorkerLogs.

", + "refs": { + "LoggingConfigurationInput$DagProcessingLogs": null, + "LoggingConfigurationInput$SchedulerLogs": null, + "LoggingConfigurationInput$TaskLogs": null, + "LoggingConfigurationInput$WebserverLogs": null, + "LoggingConfigurationInput$WorkerLogs": null + } + }, + "NetworkConfiguration": { + "base": "

Provide the security group and subnet IDs for the workers and scheduler.

", + "refs": { + "CreateEnvironmentInput$NetworkConfiguration": "

The VPC networking components you want to use for your environment. At least two private subnet identifiers and one VPC security group identifier are required to create an environment. For more information, see Creating the VPC network for a MWAA environment.

", + "Environment$NetworkConfiguration": null + } + }, + "NextToken": { + "base": null, + "refs": { + "ListEnvironmentsInput$NextToken": "

The Next Token when listing MWAA environments.

", + "ListEnvironmentsOutput$NextToken": "

The Next Token when listing MWAA environments.

" + } + }, + "PublishMetricsInput": { + "base": null, + "refs": { + } + }, + "PublishMetricsOutput": { + "base": null, + "refs": { + } + }, + "RelativePath": { + "base": null, + "refs": { + "CreateEnvironmentInput$DagS3Path": "

The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see Importing DAGs on Amazon MWAA.

", + "CreateEnvironmentInput$PluginsS3Path": "

The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then PluginsS3ObjectVersion is required. For more information, see Importing DAGs on Amazon MWAA.

", + "CreateEnvironmentInput$RequirementsS3Path": "

The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then RequirementsS3ObjectVersion is required. For more information, see Importing DAGs on Amazon MWAA.

", + "Environment$DagS3Path": "

The Dags S3 Path of the Amazon MWAA Environment.

", + "Environment$PluginsS3Path": "

The Plugins.zip S3 Path of the Amazon MWAA Environment.

", + "Environment$RequirementsS3Path": "

The Requirement.txt S3 Path of the Amazon MWAA Environment.

", + "UpdateEnvironmentInput$DagS3Path": "

The Dags folder S3 Path to update of your Amazon MWAA environment.

", + "UpdateEnvironmentInput$PluginsS3Path": "

The Plugins.zip S3 Path to update of your Amazon MWAA environment.

", + "UpdateEnvironmentInput$RequirementsS3Path": "

The Requirements.txt S3 Path to update of your Amazon MWAA environment.

" + } + }, + "ResourceNotFoundException": { + "base": "

ResourceNotFoundException: The resource is not available.

", + "refs": { + } + }, + "S3BucketArn": { + "base": null, + "refs": { + "CreateEnvironmentInput$SourceBucketArn": "

The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.

", + "Environment$SourceBucketArn": "

The Source S3 Bucket ARN of the Amazon MWAA Environment.

", + "UpdateEnvironmentInput$SourceBucketArn": "

The S3 Source Bucket ARN to update of your Amazon MWAA environment.

" + } + }, + "S3ObjectVersion": { + "base": null, + "refs": { + "CreateEnvironmentInput$PluginsS3ObjectVersion": "

The plugins.zip file version you want to use.

", + "CreateEnvironmentInput$RequirementsS3ObjectVersion": "

The requirements.txt file version you want to use.

", + "Environment$PluginsS3ObjectVersion": "

The Plugins.zip S3 Object Version of the Amazon MWAA Environment.

", + "Environment$RequirementsS3ObjectVersion": "

The Requirements.txt file S3 Object Version of the Amazon MWAA Environment.

", + "UpdateEnvironmentInput$PluginsS3ObjectVersion": "

The Plugins.zip S3 Object Version to update of your Amazon MWAA environment.

", + "UpdateEnvironmentInput$RequirementsS3ObjectVersion": "

The Requirements.txt S3 ObjectV ersion to update of your Amazon MWAA environment.

" + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "SecurityGroupList$member": null + } + }, + "SecurityGroupList": { + "base": null, + "refs": { + "NetworkConfiguration$SecurityGroupIds": "

A JSON list of 1 or more security groups IDs by name, in the same VPC as the subnets.

", + "UpdateNetworkConfigurationInput$SecurityGroupIds": "

Provide a JSON list of 1 or more security groups IDs by name, in the same VPC as the subnets.

" + } + }, + "StatisticSet": { + "base": "

Internal only API.

", + "refs": { + "MetricDatum$StatisticValues": "

Internal only API.

" + } + }, + "String": { + "base": null, + "refs": { + "AccessDeniedException$Message": null, + "Dimension$Name": "

Internal only API.

", + "Dimension$Value": "

Internal only API.

", + "InternalServerException$message": null, + "MetricDatum$MetricName": "

Internal only API.

", + "ResourceNotFoundException$message": null, + "ValidationException$message": null + } + }, + "SubnetId": { + "base": null, + "refs": { + "SubnetList$member": null + } + }, + "SubnetList": { + "base": null, + "refs": { + "NetworkConfiguration$SubnetIds": "

Provide a JSON list of 2 subnet IDs by name. These must be private subnets, in the same VPC, in two different availability zones.

" + } + }, + "SyntheticCreateCliTokenResponseToken": { + "base": null, + "refs": { + "CreateCliTokenResponse$CliToken": "

Create an Airflow CLI login token response for the provided JWT token.

" + } + }, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions": { + "base": null, + "refs": { + "CreateEnvironmentInput$AirflowConfigurationOptions": "

The Apache Airflow configuration setting you want to override in your environment. For more information, see Environment configuration.

" + } + }, + "SyntheticCreateWebLoginTokenResponseToken": { + "base": null, + "refs": { + "CreateWebLoginTokenResponse$WebToken": "

Create an Airflow Web UI login token response for the provided JWT token.

" + } + }, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions": { + "base": null, + "refs": { + "UpdateEnvironmentInput$AirflowConfigurationOptions": "

The Airflow Configuration Options to update of your Amazon MWAA environment.

" + } + }, + "TagKey": { + "base": null, + "refs": { + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceInput$tagKeys": "

The tag resource key of the MWAA environments.

" + } + }, + "TagMap": { + "base": null, + "refs": { + "CreateEnvironmentInput$Tags": "

The metadata tags you want to attach to your environment. For more information, see Tagging AWS resources.

", + "Environment$Tags": "

The Tags of the Amazon MWAA Environment.

", + "ListTagsForResourceOutput$Tags": "

The tags of the MWAA environments.

", + "TagResourceInput$Tags": "

The tag resource tag of the MWAA environments.

" + } + }, + "TagResourceInput": { + "base": null, + "refs": { + } + }, + "TagResourceOutput": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, + "Timestamp": { + "base": null, + "refs": { + "MetricDatum$Timestamp": "

Internal only API.

" + } + }, + "Unit": { + "base": "

Unit

", + "refs": { + "MetricDatum$Unit": null + } + }, + "UntagResourceInput": { + "base": null, + "refs": { + } + }, + "UntagResourceOutput": { + "base": null, + "refs": { + } + }, + "UpdateCreatedAt": { + "base": null, + "refs": { + "LastUpdate$CreatedAt": "

Time that last update occurred.

" + } + }, + "UpdateEnvironmentInput": { + "base": null, + "refs": { + } + }, + "UpdateEnvironmentOutput": { + "base": null, + "refs": { + } + }, + "UpdateError": { + "base": "

Error information of update, if applicable.

", + "refs": { + "LastUpdate$Error": "

Error string of last update, if applicable.

" + } + }, + "UpdateNetworkConfigurationInput": { + "base": "

Provide the security group and subnet IDs for the workers and scheduler.

", + "refs": { + "UpdateEnvironmentInput$NetworkConfiguration": "

The Network Configuration to update of your Amazon MWAA environment.

" + } + }, + "UpdateStatus": { + "base": null, + "refs": { + "LastUpdate$Status": "

Status of last update of SUCCESS, FAILED, CREATING, DELETING.

" + } + }, + "ValidationException": { + "base": "

ValidationException: The provided input is not valid.

", + "refs": { + } + }, + "WebserverAccessMode": { + "base": null, + "refs": { + "CreateEnvironmentInput$WebserverAccessMode": "

The networking access of your Apache Airflow web server. A public network allows your Airflow UI to be accessed over the Internet by users granted access in your IAM policy. A private network limits access of your Airflow UI to users within your VPC. For more information, see Creating the VPC network for a MWAA environment.

", + "Environment$WebserverAccessMode": "

The Webserver Access Mode of the Amazon MWAA Environment (public or private only).

", + "UpdateEnvironmentInput$WebserverAccessMode": "

The Webserver Access Mode to update of your Amazon MWAA environment.

" + } + }, + "WebserverUrl": { + "base": null, + "refs": { + "Environment$WebserverUrl": "

The Webserver URL of the Amazon MWAA Environment.

" + } + }, + "WeeklyMaintenanceWindowStart": { + "base": null, + "refs": { + "CreateEnvironmentInput$WeeklyMaintenanceWindowStart": "

The day and time you want MWAA to start weekly maintenance updates on your environment.

", + "Environment$WeeklyMaintenanceWindowStart": "

The Weekly Maintenance Window Start of the Amazon MWAA Environment.

", + "UpdateEnvironmentInput$WeeklyMaintenanceWindowStart": "

The Weekly Maintenance Window Start to update of your Amazon MWAA environment.

" + } + } + } +} diff --git a/models/apis/mwaa/2020-07-01/examples-1.json b/models/apis/mwaa/2020-07-01/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/mwaa/2020-07-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/mwaa/2020-07-01/paginators-1.json b/models/apis/mwaa/2020-07-01/paginators-1.json new file mode 100644 index 00000000000..5e218e4616b --- /dev/null +++ b/models/apis/mwaa/2020-07-01/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListEnvironments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Environments" + } + } +} diff --git a/models/apis/quicksight/2018-04-01/api-2.json b/models/apis/quicksight/2018-04-01/api-2.json index da087382ae0..13c03cc0492 100644 --- a/models/apis/quicksight/2018-04-01/api-2.json +++ b/models/apis/quicksight/2018-04-01/api-2.json @@ -926,6 +926,7 @@ {"shape":"IdentityTypeNotSupportedException"}, {"shape":"SessionLifetimeInMinutesInvalidException"}, {"shape":"UnsupportedUserEditionException"}, + {"shape":"UnsupportedPricingPlanException"}, {"shape":"InternalFailureException"} ] }, @@ -1800,6 +1801,12 @@ "AvailabilityStatus":{"shape":"DashboardBehavior"} } }, + "AdditionalDashboardIdList":{ + "type":"list", + "member":{"shape":"RestrictiveResourceId"}, + "max":20, + "min":1 + }, "AliasName":{ "type":"string", "max":2048, @@ -4431,6 +4438,14 @@ "ENTERPRISE" ] }, + "EmbeddingIdentityType":{ + "type":"string", + "enum":[ + "IAM", + "QUICKSIGHT", + "ANONYMOUS" + ] + }, "EmbeddingUrl":{ "type":"string", "sensitive":true @@ -4542,7 +4557,7 @@ "locationName":"DashboardId" }, "IdentityType":{ - "shape":"IdentityType", + "shape":"EmbeddingIdentityType", "location":"querystring", "locationName":"creds-type" }, @@ -4570,6 +4585,16 @@ "shape":"Arn", "location":"querystring", "locationName":"user-arn" + }, + "Namespace":{ + "shape":"Namespace", + "location":"querystring", + "locationName":"namespace" + }, + "AdditionalDashboardIds":{ + "shape":"AdditionalDashboardIdList", + "location":"querystring", + "locationName":"additional-dashboard-ids" } } }, @@ -6922,6 +6947,15 @@ "MeasureForeground":{"shape":"HexColor"} } }, + "UnsupportedPricingPlanException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, "UnsupportedUserEditionException":{ "type":"structure", "members":{ diff --git a/models/apis/quicksight/2018-04-01/docs-2.json b/models/apis/quicksight/2018-04-01/docs-2.json index 7435add014b..7a17b1212c9 100644 --- a/models/apis/quicksight/2018-04-01/docs-2.json +++ b/models/apis/quicksight/2018-04-01/docs-2.json @@ -148,6 +148,12 @@ "DashboardPublishOptions$AdHocFilteringOption": "

Ad hoc (one-time) filtering option.

" } }, + "AdditionalDashboardIdList": { + "base": null, + "refs": { + "GetDashboardEmbedUrlRequest$AdditionalDashboardIds": "

A list of one or more dashboard ids that you want to add to a session that includes anonymous authorizations. IdentityType must be set to ANONYMOUS for this to work, because other other identity types authenticate as QuickSight users. For example, if you set \"--dashboard-id dash_id1 --dashboard-id dash_id2 dash_id3 identity-type ANONYMOUS\", the session can access all three dashboards.

" + } + }, "AliasName": { "base": null, "refs": { @@ -509,7 +515,7 @@ "Boolean": { "base": null, "refs": { - "DataSetSummary$ColumnLevelPermissionRulesApplied": "

Indicates if the dataset has column level permission configured.

", + "DataSetSummary$ColumnLevelPermissionRulesApplied": "

Indicates if the dataset has column level permission configured.

", "GetDashboardEmbedUrlRequest$UndoRedoDisabled": "

Remove the undo/redo button on the embedded dashboard. The default is FALSE, which enables the undo/redo button.

", "GetDashboardEmbedUrlRequest$ResetDisabled": "

Remove the reset button on the embedded dashboard. The default is FALSE, which enables the reset button.

", "GetDashboardEmbedUrlRequest$StatePersistenceEnabled": "

Adds persistence of state for the user session in an embedded dashboard. Persistence applies to the sheet and the parameter settings. These are control settings that the dashboard subscriber (QuickSight reader) chooses while viewing the dashboard. If this is set to TRUE, the settings are the same when the the subscriber reopens the same dashboard URL. The state is stored in QuickSight, not in a browser cookie. If this is set to FALSE, the state of the user session is not persisted. The default is FALSE.

", @@ -1568,6 +1574,12 @@ "AccountSettings$Edition": "

The edition of QuickSight that you're currently subscribed to: Enterprise edition or Standard edition.

" } }, + "EmbeddingIdentityType": { + "base": null, + "refs": { + "GetDashboardEmbedUrlRequest$IdentityType": "

The authentication method that the user uses to sign in.

" + } + }, "EmbeddingUrl": { "base": null, "refs": { @@ -1647,12 +1659,12 @@ } }, "GetDashboardEmbedUrlRequest": { - "base": null, + "base": "

Parameter input for the GetDashboardEmbedUrl operation.

", "refs": { } }, "GetDashboardEmbedUrlResponse": { - "base": null, + "base": "

Output returned from the GetDashboardEmbedUrl operation.

", "refs": { } }, @@ -1836,7 +1848,6 @@ "IdentityType": { "base": null, "refs": { - "GetDashboardEmbedUrlRequest$IdentityType": "

The authentication method that the user uses to sign in.

", "RegisterUserRequest$IdentityType": "

Amazon QuickSight supports several ways of managing the identity of users. This parameter accepts two values:

  • IAM: A user whose identity maps to an existing IAM user or role.

  • QUICKSIGHT: A user whose identity is owned and managed internally by Amazon QuickSight.

", "User$IdentityType": "

The type of identity authentication used by the user.

" } @@ -2297,6 +2308,7 @@ "DescribeIAMPolicyAssignmentRequest$Namespace": "

The namespace that contains the assignment.

", "DescribeNamespaceRequest$Namespace": "

The namespace that you want to describe.

", "DescribeUserRequest$Namespace": "

The namespace. Currently, you should set this to default.

", + "GetDashboardEmbedUrlRequest$Namespace": "

The QuickSight namespace that contains the dashboard IDs in this request. If you're not using a custom namespace, set this to \"default\".

", "ListGroupMembershipsRequest$Namespace": "

The namespace. Currently, you should set this to default.

", "ListGroupsRequest$Namespace": "

The namespace. Currently, you should set this to default.

", "ListIAMPolicyAssignmentsForUserRequest$Namespace": "

The namespace of the assignment.

", @@ -2687,6 +2699,7 @@ "RestrictiveResourceId": { "base": null, "refs": { + "AdditionalDashboardIdList$member": null, "Analysis$AnalysisId": "

The ID of the analysis.

", "AnalysisSummary$AnalysisId": "

The ID of the analysis. This ID displays in the URL.

", "CreateAnalysisRequest$AnalysisId": "

The ID for the analysis that you're creating. This ID displays in the URL of the analysis.

", @@ -3207,6 +3220,8 @@ "TagResourceResponse$RequestId": "

The AWS request ID for this operation.

", "ThrottlingException$Message": null, "ThrottlingException$RequestId": "

The AWS request ID for this request.

", + "UnsupportedPricingPlanException$Message": null, + "UnsupportedPricingPlanException$RequestId": "

The AWS request ID for this request.

", "UnsupportedUserEditionException$Message": null, "UnsupportedUserEditionException$RequestId": "

The AWS request ID for this request.

", "UntagResourceResponse$RequestId": "

The AWS request ID for this operation.

", @@ -3603,6 +3618,11 @@ "ThemeConfiguration$UIColorPalette": "

Color properties that apply to the UI and to charts, excluding the colors that apply to data.

" } }, + "UnsupportedPricingPlanException": { + "base": "

This error indicates that you are calling an embedding operation in Amazon QuickSight without the required pricing plan on your AWS account. Before you can use anonymous embedding, a QuickSight administrator needs to add capacity pricing to QuickSight. You can do this on the Manage QuickSight page.

After capacity pricing is added, you can enable anonymous embedding by using the GetDashboardEmbedUrl API operation with the --identity-type ANONYMOUS option.

", + "refs": { + } + }, "UnsupportedUserEditionException": { "base": "

This error indicates that you are calling an operation on an Amazon QuickSight subscription where the edition doesn't include support for that operation. Amazon QuickSight currently has Standard Edition and Enterprise Edition. Not every operation and capability is available in every edition.

", "refs": { diff --git a/models/apis/states/2016-11-23/api-2.json b/models/apis/states/2016-11-23/api-2.json index a60dbfa4ad9..5aa1176395d 100644 --- a/models/apis/states/2016-11-23/api-2.json +++ b/models/apis/states/2016-11-23/api-2.json @@ -268,6 +268,24 @@ ], "idempotent":true }, + "StartSyncExecution":{ + "name":"StartSyncExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartSyncExecutionInput"}, + "output":{"shape":"StartSyncExecutionOutput"}, + "errors":[ + {"shape":"InvalidArn"}, + {"shape":"InvalidExecutionInput"}, + {"shape":"InvalidName"}, + {"shape":"StateMachineDoesNotExist"}, + {"shape":"StateMachineDeleting"}, + {"shape":"StateMachineTypeNotSupported"} + ], + "endpoint":{"hostPrefix":"sync-"} + }, "StopExecution":{ "name":"StopExecution", "http":{ @@ -423,10 +441,25 @@ "max":256, "min":1 }, + "BilledDuration":{ + "type":"long", + "min":0 + }, + "BilledMemoryUsed":{ + "type":"long", + "min":0 + }, + "BillingDetails":{ + "type":"structure", + "members":{ + "billedMemoryUsedInMB":{"shape":"BilledMemoryUsed"}, + "billedDurationInMilliseconds":{"shape":"BilledDuration"} + } + }, "CloudWatchEventsExecutionDataDetails":{ "type":"structure", "members":{ - "included":{"shape":"included"} + "included":{"shape":"includedDetails"} } }, "CloudWatchLogsLogGroup":{ @@ -1199,6 +1232,41 @@ "startDate":{"shape":"Timestamp"} } }, + "StartSyncExecutionInput":{ + "type":"structure", + "required":["stateMachineArn"], + "members":{ + "stateMachineArn":{"shape":"Arn"}, + "name":{"shape":"Name"}, + "input":{"shape":"SensitiveData"}, + "traceHeader":{"shape":"TraceHeader"} + } + }, + "StartSyncExecutionOutput":{ + "type":"structure", + "required":[ + "executionArn", + "startDate", + "stopDate", + "status" + ], + "members":{ + "executionArn":{"shape":"Arn"}, + "stateMachineArn":{"shape":"Arn"}, + "name":{"shape":"Name"}, + "startDate":{"shape":"Timestamp"}, + "stopDate":{"shape":"Timestamp"}, + "status":{"shape":"SyncExecutionStatus"}, + "error":{"shape":"SensitiveError"}, + "cause":{"shape":"SensitiveCause"}, + "input":{"shape":"SensitiveData"}, + "inputDetails":{"shape":"CloudWatchEventsExecutionDataDetails"}, + "output":{"shape":"SensitiveData"}, + "outputDetails":{"shape":"CloudWatchEventsExecutionDataDetails"}, + "traceHeader":{"shape":"TraceHeader"}, + "billingDetails":{"shape":"BillingDetails"} + } + }, "StateEnteredEventDetails":{ "type":"structure", "required":["name"], @@ -1301,6 +1369,14 @@ "stopDate":{"shape":"Timestamp"} } }, + "SyncExecutionStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "TIMED_OUT" + ] + }, "Tag":{ "type":"structure", "members":{ @@ -1533,7 +1609,7 @@ "updateDate":{"shape":"Timestamp"} } }, - "included":{"type":"boolean"}, + "includedDetails":{"type":"boolean"}, "truncated":{"type":"boolean"} } } diff --git a/models/apis/states/2016-11-23/docs-2.json b/models/apis/states/2016-11-23/docs-2.json index 685016ee658..0d978bfacc8 100644 --- a/models/apis/states/2016-11-23/docs-2.json +++ b/models/apis/states/2016-11-23/docs-2.json @@ -20,6 +20,7 @@ "SendTaskHeartbeat": "

Used by activity workers and task states using the callback pattern to report to Step Functions that the task represented by the specified taskToken is still making progress. This action resets the Heartbeat clock. The Heartbeat threshold is specified in the state machine's Amazon States Language definition (HeartbeatSeconds). This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut entry for activities, or a TaskTimedOut entry for for tasks using the job run or callback pattern.

The Timeout of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received. Use HeartbeatSeconds to configure the timeout interval for heartbeats.

", "SendTaskSuccess": "

Used by activity workers and task states using the callback pattern to report that the task identified by the taskToken completed successfully.

", "StartExecution": "

Starts a state machine execution.

StartExecution is idempotent. If StartExecution is called with the same name and input as a running execution, the call will succeed and return the same response as the original request. If the execution is closed or if the input is different, it will return a 400 ExecutionAlreadyExists error. Names can be reused after 90 days.

", + "StartSyncExecution": "

Starts a Synchronous Express state machine execution.

", "StopExecution": "

Stops an execution.

This API action is not supported by EXPRESS state machines.

", "TagResource": "

Add a tag to a Step Functions resource.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

", "UntagResource": "

Remove a tag from a Step Functions resource

", @@ -103,7 +104,7 @@ "DescribeActivityInput$activityArn": "

The Amazon Resource Name (ARN) of the activity to describe.

", "DescribeActivityOutput$activityArn": "

The Amazon Resource Name (ARN) that identifies the activity.

", "DescribeExecutionInput$executionArn": "

The Amazon Resource Name (ARN) of the execution to describe.

", - "DescribeExecutionOutput$executionArn": "

The Amazon Resource Name (ARN) that id entifies the execution.

", + "DescribeExecutionOutput$executionArn": "

The Amazon Resource Name (ARN) that identifies the execution.

", "DescribeExecutionOutput$stateMachineArn": "

The Amazon Resource Name (ARN) of the executed stated machine.

", "DescribeStateMachineForExecutionInput$executionArn": "

The Amazon Resource Name (ARN) of the execution you want state machine information for.

", "DescribeStateMachineForExecutionOutput$stateMachineArn": "

The Amazon Resource Name (ARN) of the state machine associated with the execution.

", @@ -111,7 +112,7 @@ "DescribeStateMachineInput$stateMachineArn": "

The Amazon Resource Name (ARN) of the state machine to describe.

", "DescribeStateMachineOutput$stateMachineArn": "

The Amazon Resource Name (ARN) that identifies the state machine.

", "DescribeStateMachineOutput$roleArn": "

The Amazon Resource Name (ARN) of the IAM role used when creating this state machine. (The IAM role maintains security by granting Step Functions access to AWS resources.)

", - "ExecutionListItem$executionArn": "

The Amazon Resource Name (ARN) that id entifies the execution.

", + "ExecutionListItem$executionArn": "

The Amazon Resource Name (ARN) that identifies the execution.

", "ExecutionListItem$stateMachineArn": "

The Amazon Resource Name (ARN) of the executed state machine.

", "ExecutionStartedEventDetails$roleArn": "

The Amazon Resource Name (ARN) of the IAM role used for executing AWS Lambda tasks.

", "GetActivityTaskInput$activityArn": "

The Amazon Resource Name (ARN) of the activity to retrieve tasks from (assigned when you create the task using CreateActivity.)

", @@ -121,7 +122,10 @@ "ListTagsForResourceInput$resourceArn": "

The Amazon Resource Name (ARN) for the Step Functions state machine or activity.

", "ResourceNotFound$resourceName": null, "StartExecutionInput$stateMachineArn": "

The Amazon Resource Name (ARN) of the state machine to execute.

", - "StartExecutionOutput$executionArn": "

The Amazon Resource Name (ARN) that id entifies the execution.

", + "StartExecutionOutput$executionArn": "

The Amazon Resource Name (ARN) that identifies the execution.

", + "StartSyncExecutionInput$stateMachineArn": "

The Amazon Resource Name (ARN) of the state machine to execute.

", + "StartSyncExecutionOutput$executionArn": "

The Amazon Resource Name (ARN) that identifies the execution.

", + "StartSyncExecutionOutput$stateMachineArn": "

The Amazon Resource Name (ARN) that identifies the state machine.

", "StateMachineListItem$stateMachineArn": "

The Amazon Resource Name (ARN) that identifies the state machine.

", "StopExecutionInput$executionArn": "

The Amazon Resource Name (ARN) of the execution to stop.

", "TagResourceInput$resourceArn": "

The Amazon Resource Name (ARN) for the Step Functions state machine or activity.

", @@ -131,11 +135,31 @@ "UpdateStateMachineInput$roleArn": "

The Amazon Resource Name (ARN) of the IAM role of the state machine.

" } }, + "BilledDuration": { + "base": null, + "refs": { + "BillingDetails$billedDurationInMilliseconds": "

Billed duration of your workflow, in milliseconds.

" + } + }, + "BilledMemoryUsed": { + "base": null, + "refs": { + "BillingDetails$billedMemoryUsedInMB": "

Billed memory consumption of your workflow, in MB.

" + } + }, + "BillingDetails": { + "base": "

An object that describes workflow billing details.

", + "refs": { + "StartSyncExecutionOutput$billingDetails": "

An object that describes workflow billing details, including billed duration and memory use.

" + } + }, "CloudWatchEventsExecutionDataDetails": { "base": "

Provides details about execution input or output.

", "refs": { "DescribeExecutionOutput$inputDetails": null, - "DescribeExecutionOutput$outputDetails": null + "DescribeExecutionOutput$outputDetails": null, + "StartSyncExecutionOutput$inputDetails": null, + "StartSyncExecutionOutput$outputDetails": null } }, "CloudWatchLogsLogGroup": { @@ -601,6 +625,8 @@ "GetActivityTaskInput$workerName": "

You can provide an arbitrary name in order to identify the worker that the task is assigned to. This name is used when it is logged in the execution history.

", "MapIterationEventDetails$name": "

The name of the iteration’s parent Map state.

", "StartExecutionInput$name": "

The name of the execution. This name must be unique for your AWS account, region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters \" # % \\ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.

", + "StartSyncExecutionInput$name": "

The name of the execution.

", + "StartSyncExecutionOutput$name": "

The name of the execution.

", "StateEnteredEventDetails$name": "

The name of the state.

", "StateExitedEventDetails$name": "

The name of the state.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters \" # % \\ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.

", "StateMachineListItem$name": "

The name of the state machine.

A name must not contain:

  • white space

  • brackets < > { } [ ]

  • wildcard characters ? *

  • special characters \" # % \\ ^ | ~ ` $ & , ; : /

  • control characters (U+0000-001F, U+007F-009F)

To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.

", @@ -698,6 +724,7 @@ "LambdaFunctionStartFailedEventDetails$cause": "

A more detailed explanation of the cause of the failure.

", "LambdaFunctionTimedOutEventDetails$cause": "

A more detailed explanation of the cause of the timeout.

", "SendTaskFailureInput$cause": "

A more detailed explanation of the cause of the failure.

", + "StartSyncExecutionOutput$cause": "

A more detailed explanation of the cause of the failure.

", "StopExecutionInput$cause": "

A more detailed explanation of the cause of the failure.

", "TaskFailedEventDetails$cause": "

A more detailed explanation of the cause of the failure.

", "TaskStartFailedEventDetails$cause": "

A more detailed explanation of the cause of the failure.

", @@ -718,6 +745,9 @@ "LambdaFunctionSucceededEventDetails$output": "

The JSON data output by the lambda function. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

", "SendTaskSuccessInput$output": "

The JSON output of the task. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

", "StartExecutionInput$input": "

The string that contains the JSON input data for the execution, for example:

\"input\": \"{\\\"first_name\\\" : \\\"test\\\"}\"

If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\"

Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

", + "StartSyncExecutionInput$input": "

The string that contains the JSON input data for the execution, for example:

\"input\": \"{\\\"first_name\\\" : \\\"test\\\"}\"

If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\"

Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

", + "StartSyncExecutionOutput$input": "

The string that contains the JSON input data of the execution. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

", + "StartSyncExecutionOutput$output": "

The JSON output data of the execution. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

This field is set only if the execution succeeds. If the execution fails, this field is null.

", "StateEnteredEventDetails$input": "

The string that contains the JSON input data for the state. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

", "StateExitedEventDetails$output": "

The JSON output data of the state. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

", "TaskSubmittedEventDetails$output": "

The response from a resource when a task has started. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

", @@ -744,6 +774,7 @@ "LambdaFunctionStartFailedEventDetails$error": "

The error code of the failure.

", "LambdaFunctionTimedOutEventDetails$error": "

The error code of the failure.

", "SendTaskFailureInput$error": "

The error code of the failure.

", + "StartSyncExecutionOutput$error": "

The error code of the failure.

", "StopExecutionInput$error": "

The error code of the failure.

", "TaskFailedEventDetails$error": "

The error code of the failure.

", "TaskStartFailedEventDetails$error": "

The error code of the failure.

", @@ -761,6 +792,16 @@ "refs": { } }, + "StartSyncExecutionInput": { + "base": null, + "refs": { + } + }, + "StartSyncExecutionOutput": { + "base": null, + "refs": { + } + }, "StateEnteredEventDetails": { "base": "

Contains details about a state entered during an execution.

", "refs": { @@ -834,6 +875,12 @@ "refs": { } }, + "SyncExecutionStatus": { + "base": null, + "refs": { + "StartSyncExecutionOutput$status": "

The current status of the execution.

" + } + }, "Tag": { "base": "

Tags are key-value pairs that can be associated with Step Functions state machines and activities.

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.

Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @.

", "refs": { @@ -970,6 +1017,8 @@ "ExecutionListItem$stopDate": "

If the execution already ended, the date the execution stopped.

", "HistoryEvent$timestamp": "

The date and time the event occurred.

", "StartExecutionOutput$startDate": "

The date the execution is started.

", + "StartSyncExecutionOutput$startDate": "

The date the execution is started.

", + "StartSyncExecutionOutput$stopDate": "

If the execution has already ended, the date the execution stopped.

", "StateMachineListItem$creationDate": "

The date the state machine is created.

", "StopExecutionOutput$stopDate": "

The date the execution is stopped.

", "UpdateStateMachineOutput$updateDate": "

The date and time the state machine was updated.

" @@ -983,8 +1032,10 @@ "TraceHeader": { "base": null, "refs": { - "DescribeExecutionOutput$traceHeader": "

The AWS X-Ray trace header which was passed to the execution.

", - "StartExecutionInput$traceHeader": "

Passes the AWS X-Ray trace header. The trace header can also be passed in the request payload.

" + "DescribeExecutionOutput$traceHeader": "

The AWS X-Ray trace header that was passed to the execution.

", + "StartExecutionInput$traceHeader": "

Passes the AWS X-Ray trace header. The trace header can also be passed in the request payload.

", + "StartSyncExecutionInput$traceHeader": "

Passes the AWS X-Ray trace header. The trace header can also be passed in the request payload.

", + "StartSyncExecutionOutput$traceHeader": "

The AWS X-Ray trace header that was passed to the execution.

" } }, "TracingConfiguration": { @@ -1023,7 +1074,7 @@ "refs": { } }, - "included": { + "includedDetails": { "base": null, "refs": { "CloudWatchEventsExecutionDataDetails$included": "

Indicates whether input or output was included in the response. Always true for API calls.

" diff --git a/models/apis/timestream-write/2018-11-01/api-2.json b/models/apis/timestream-write/2018-11-01/api-2.json index 063fd9c08a3..1e6ee45288b 100644 --- a/models/apis/timestream-write/2018-11-01/api-2.json +++ b/models/apis/timestream-write/2018-11-01/api-2.json @@ -183,6 +183,7 @@ "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"InvalidEndpointException"} ], @@ -199,6 +200,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"InvalidEndpointException"} ], @@ -215,6 +217,7 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidEndpointException"} ], @@ -537,10 +540,15 @@ "MeasureValue":{"shape":"StringValue2048"}, "MeasureValueType":{"shape":"MeasureValueType"}, "Time":{"shape":"StringValue256"}, - "TimeUnit":{"shape":"TimeUnit"} + "TimeUnit":{"shape":"TimeUnit"}, + "Version":{ + "shape":"RecordVersion", + "box":true + } } }, "RecordIndex":{"type":"integer"}, + "RecordVersion":{"type":"long"}, "Records":{ "type":"list", "member":{"shape":"Record"}, @@ -551,7 +559,11 @@ "type":"structure", "members":{ "RecordIndex":{"shape":"RecordIndex"}, - "Reason":{"shape":"ErrorMessage"} + "Reason":{"shape":"ErrorMessage"}, + "ExistingVersion":{ + "shape":"RecordVersion", + "box":true + } } }, "RejectedRecords":{ diff --git a/models/apis/timestream-write/2018-11-01/docs-2.json b/models/apis/timestream-write/2018-11-01/docs-2.json index d602defe911..bb18eaf6e9b 100644 --- a/models/apis/timestream-write/2018-11-01/docs-2.json +++ b/models/apis/timestream-write/2018-11-01/docs-2.json @@ -4,8 +4,8 @@ "operations": { "CreateDatabase": "

Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to AWS managed KMS keys for more info. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", "CreateTable": "

The CreateTable operation adds a new table to an existing database in your account. In an AWS account, table names must be at least unique within each Region if they are in the same database. You may have identical table names in the same Region if the tables are in seperate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", - "DeleteDatabase": "

Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.

All tables in the database must be deleted first, or a ValidationException error will be thrown.

", - "DeleteTable": "

Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.

", + "DeleteDatabase": "

Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.

All tables in the database must be deleted first, or a ValidationException error will be thrown.

Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

", + "DeleteTable": "

Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.

Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

", "DescribeDatabase": "

Returns information about the database, including the database name, time that the database was created, and the total number of tables found within the database. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", "DescribeEndpoints": "

DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query.

Because Timestream’s SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless:

  • Your application uses a programming language that does not yet have SDK support

  • You require better control over the client-side implementation

For detailed information on how to use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs.

", "DescribeTable": "

Returns information about the table, including the table name, database name, retention duration of the memory store and the magnetic store. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", @@ -158,7 +158,7 @@ "ConflictException$Message": null, "InternalServerException$Message": null, "InvalidEndpointException$Message": null, - "RejectedRecord$Reason": "

The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:

  • Records with duplicate data where there are multiple records with the same dimensions, timestamps, and measure names but different measure values.

  • Records with timestamps that lie outside the retention duration of the memory store

  • Records with dimensions or measures that exceed the Timestream defined limits.

For more information, see Access Management in the Timestream Developer Guide.

", + "RejectedRecord$Reason": "

The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:

  • Records with duplicate data where there are multiple records with the same dimensions, timestamps, and measure names but different measure values.

  • Records with timestamps that lie outside the retention duration of the memory store

    When the retention window is updated, you will receive a RejectedRecords exception if you immediately try to ingest data within the new window. To avoid a RejectedRecords exception, wait until the duration of the new window to ingest new data. For further information, see Best Practices for Configuring Timestream and the explanation of how storage works in Timestream.

  • Records with dimensions or measures that exceed the Timestream defined limits.

For more information, see Access Management in the Timestream Developer Guide.

", "RejectedRecordsException$Message": null, "ResourceNotFoundException$Message": null, "ServiceQuotaExceededException$Message": null, @@ -251,6 +251,13 @@ "RejectedRecord$RecordIndex": "

The index of the record in the input request for WriteRecords. Indexes begin with 0.

" } }, + "RecordVersion": { + "base": null, + "refs": { + "Record$Version": "

64-bit attribute used for record updates. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated . Default value is to 1.

", + "RejectedRecord$ExistingVersion": "

The existing version of the record. This value is populated in scenarios where an identical record exists with a higher version than the version in the write request.

" + } + }, "Records": { "base": null, "refs": { @@ -340,9 +347,9 @@ "StringValue256": { "base": null, "refs": { - "Dimension$Name": "

Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions. Dimension names can only contain alphanumeric characters and underscores. Dimension names cannot end with an underscore.

", + "Dimension$Name": "

Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions.

For constraints on Dimension names, see Naming Constraints.

", "Record$MeasureName": "

Measure represents the data attribute of the time series. For example, the CPU utilization of an EC2 instance or the RPM of a wind turbine are measures.

", - "Record$Time": "

Contains the time at which the measure value for the data point was collected.

" + "Record$Time": "

Contains the time at which the measure value for the data point was collected. The time value plus the unit provides the time elapsed since the epoch. For example, if the time value is 12345 and the unit is ms, then 12345 ms have elapsed since the epoch.

" } }, "Table": { diff --git a/models/apis/transcribe-streaming/2017-10-26/api-2.json b/models/apis/transcribe-streaming/2017-10-26/api-2.json index 8d95ac941a3..b8df57377ef 100755 --- a/models/apis/transcribe-streaming/2017-10-26/api-2.json +++ b/models/apis/transcribe-streaming/2017-10-26/api-2.json @@ -12,6 +12,22 @@ "uid":"transcribe-streaming-2017-10-26" }, "operations":{ + "StartMedicalStreamTranscription":{ + "name":"StartMedicalStreamTranscription", + "http":{ + "method":"POST", + "requestUri":"/medical-stream-transcription" + }, + "input":{"shape":"StartMedicalStreamTranscriptionRequest"}, + "output":{"shape":"StartMedicalStreamTranscriptionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "StartStreamTranscription":{ "name":"StartStreamTranscription", "http":{ @@ -68,6 +84,7 @@ "exception":true }, "Boolean":{"type":"boolean"}, + "Confidence":{"type":"double"}, "ConflictException":{ "type":"structure", "members":{ @@ -118,7 +135,10 @@ "fr-FR", "en-AU", "it-IT", - "de-DE" + "de-DE", + "pt-BR", + "ja-JP", + "ko-KR" ] }, "LimitExceededException":{ @@ -131,13 +151,83 @@ }, "MediaEncoding":{ "type":"string", - "enum":["pcm"] + "enum":[ + "pcm", + "ogg-opus", + "flac" + ] }, "MediaSampleRateHertz":{ "type":"integer", "max":48000, "min":8000 }, + "MedicalAlternative":{ + "type":"structure", + "members":{ + "Transcript":{"shape":"String"}, + "Items":{"shape":"MedicalItemList"} + } + }, + "MedicalAlternativeList":{ + "type":"list", + "member":{"shape":"MedicalAlternative"} + }, + "MedicalItem":{ + "type":"structure", + "members":{ + "StartTime":{"shape":"Double"}, + "EndTime":{"shape":"Double"}, + "Type":{"shape":"ItemType"}, + "Content":{"shape":"String"}, + "Confidence":{"shape":"Confidence"}, + "Speaker":{"shape":"String"} + } + }, + "MedicalItemList":{ + "type":"list", + "member":{"shape":"MedicalItem"} + }, + "MedicalResult":{ + "type":"structure", + "members":{ + "ResultId":{"shape":"String"}, + "StartTime":{"shape":"Double"}, + "EndTime":{"shape":"Double"}, + "IsPartial":{"shape":"Boolean"}, + "Alternatives":{"shape":"MedicalAlternativeList"}, + "ChannelId":{"shape":"String"} + } + }, + "MedicalResultList":{ + "type":"list", + "member":{"shape":"MedicalResult"} + }, + "MedicalTranscript":{ + "type":"structure", + "members":{ + "Results":{"shape":"MedicalResultList"} + } + }, + "MedicalTranscriptEvent":{ + "type":"structure", + "members":{ + "Transcript":{"shape":"MedicalTranscript"} + }, + "event":true + }, + "MedicalTranscriptResultStream":{ + "type":"structure", + "members":{ + "TranscriptEvent":{"shape":"MedicalTranscriptEvent"}, + "BadRequestException":{"shape":"BadRequestException"}, + "LimitExceededException":{"shape":"LimitExceededException"}, + "InternalFailureException":{"shape":"InternalFailureException"}, + "ConflictException":{"shape":"ConflictException"}, + "ServiceUnavailableException":{"shape":"ServiceUnavailableException"} + }, + "eventstream":true + }, "NumberOfChannels":{ "type":"integer", "min":2 @@ -172,6 +262,144 @@ "min":36, "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" }, + "Specialty":{ + "type":"string", + "enum":[ + "PRIMARYCARE", + "CARDIOLOGY", + "NEUROLOGY", + "ONCOLOGY", + "RADIOLOGY", + "UROLOGY" + ] + }, + "StartMedicalStreamTranscriptionRequest":{ + "type":"structure", + "required":[ + "LanguageCode", + "MediaSampleRateHertz", + "MediaEncoding", + "Specialty", + "Type", + "AudioStream" + ], + "members":{ + "LanguageCode":{ + "shape":"LanguageCode", + "location":"header", + "locationName":"x-amzn-transcribe-language-code" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "location":"header", + "locationName":"x-amzn-transcribe-sample-rate" + }, + "MediaEncoding":{ + "shape":"MediaEncoding", + "location":"header", + "locationName":"x-amzn-transcribe-media-encoding" + }, + "VocabularyName":{ + "shape":"VocabularyName", + "location":"header", + "locationName":"x-amzn-transcribe-vocabulary-name" + }, + "Specialty":{ + "shape":"Specialty", + "location":"header", + "locationName":"x-amzn-transcribe-specialty" + }, + "Type":{ + "shape":"Type", + "location":"header", + "locationName":"x-amzn-transcribe-type" + }, + "ShowSpeakerLabel":{ + "shape":"Boolean", + "location":"header", + "locationName":"x-amzn-transcribe-show-speaker-label" + }, + "SessionId":{ + "shape":"SessionId", + "location":"header", + "locationName":"x-amzn-transcribe-session-id" + }, + "AudioStream":{"shape":"AudioStream"}, + "EnableChannelIdentification":{ + "shape":"Boolean", + "location":"header", + "locationName":"x-amzn-transcribe-enable-channel-identification" + }, + "NumberOfChannels":{ + "shape":"NumberOfChannels", + "location":"header", + "locationName":"x-amzn-transcribe-number-of-channels" + } + }, + "payload":"AudioStream" + }, + "StartMedicalStreamTranscriptionResponse":{ + "type":"structure", + "members":{ + "RequestId":{ + "shape":"RequestId", + "location":"header", + "locationName":"x-amzn-request-id" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "location":"header", + "locationName":"x-amzn-transcribe-language-code" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "location":"header", + "locationName":"x-amzn-transcribe-sample-rate" + }, + "MediaEncoding":{ + "shape":"MediaEncoding", + "location":"header", + "locationName":"x-amzn-transcribe-media-encoding" + }, + "VocabularyName":{ + "shape":"VocabularyName", + "location":"header", + "locationName":"x-amzn-transcribe-vocabulary-name" + }, + "Specialty":{ + "shape":"Specialty", + "location":"header", + "locationName":"x-amzn-transcribe-specialty" + }, + "Type":{ + "shape":"Type", + "location":"header", + "locationName":"x-amzn-transcribe-type" + }, + "ShowSpeakerLabel":{ + "shape":"Boolean", + "location":"header", + "locationName":"x-amzn-transcribe-show-speaker-label" + }, + "SessionId":{ + "shape":"SessionId", + "location":"header", + "locationName":"x-amzn-transcribe-session-id" + }, + "TranscriptResultStream":{"shape":"MedicalTranscriptResultStream"}, + "EnableChannelIdentification":{ + "shape":"Boolean", + "location":"header", + "locationName":"x-amzn-transcribe-enable-channel-identification" + }, + "NumberOfChannels":{ + "shape":"NumberOfChannels", + "location":"header", + "locationName":"x-amzn-transcribe-number-of-channels" + } + }, + "payload":"TranscriptResultStream" + }, "StartStreamTranscriptionRequest":{ "type":"structure", "required":[ @@ -323,6 +551,13 @@ }, "eventstream":true }, + "Type":{ + "type":"string", + "enum":[ + "CONVERSATION", + "DICTATION" + ] + }, "VocabularyFilterMethod":{ "type":"string", "enum":[ diff --git a/models/apis/transcribe-streaming/2017-10-26/docs-2.json b/models/apis/transcribe-streaming/2017-10-26/docs-2.json index 572ae607d4a..a043ac5fcbf 100755 --- a/models/apis/transcribe-streaming/2017-10-26/docs-2.json +++ b/models/apis/transcribe-streaming/2017-10-26/docs-2.json @@ -2,6 +2,7 @@ "version": "2.0", "service": "

Operations and objects for transcribing streaming speech to text.

", "operations": { + "StartMedicalStreamTranscription": "

Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon Transcribe Medical and the transcription results are streamed to your application.

", "StartStreamTranscription": "

Starts a bidirectional HTTP2 stream where audio is streamed to Amazon Transcribe and the transcription results are streamed to your application.

The following are encoded as HTTP2 headers:

  • x-amzn-transcribe-language-code

  • x-amzn-transcribe-media-encoding

  • x-amzn-transcribe-sample-rate

  • x-amzn-transcribe-session-id

" }, "shapes": { @@ -32,12 +33,14 @@ "AudioStream": { "base": "

Represents the audio stream from your application to Amazon Transcribe.

", "refs": { + "StartMedicalStreamTranscriptionRequest$AudioStream": null, "StartStreamTranscriptionRequest$AudioStream": "

PCM-encoded stream of audio blobs. The audio stream is encoded as an HTTP2 data frame.

" } }, "BadRequestException": { - "base": "

One or more arguments to the StartStreamTranscription operation was invalid. For example, MediaEncoding was not set to pcm or LanguageCode was not set to a valid code. Check the parameters and try your request again.

", + "base": "

One or more arguments to the StartStreamTranscription or StartMedicalStreamTranscription operation was invalid. For example, MediaEncoding was not set to a valid encoding, or LanguageCode was not set to a valid code. Check the parameters and try your request again.

", "refs": { + "MedicalTranscriptResultStream$BadRequestException": null, "TranscriptResultStream$BadRequestException": "

A client error occurred when the stream was created. Check the parameters of the request and try your request again.

" } }, @@ -45,16 +48,28 @@ "base": null, "refs": { "Item$VocabularyFilterMatch": "

Indicates whether a word in the item matches a word in the vocabulary filter you've chosen for your real-time stream. If true then a word in the item matches your vocabulary filter.

", + "MedicalResult$IsPartial": "

Amazon Transcribe Medical divides the incoming audio stream into segments at natural points in the audio. Transcription results are returned based on these segments.

The IsPartial field is true to indicate that Amazon Transcribe Medical has additional transcription data to send. The IsPartial field is false to indicate that this is the last transcription result for the segment.

", "Result$IsPartial": "

Amazon Transcribe divides the incoming audio stream into segments at natural points in the audio. Transcription results are returned based on these segments.

The IsPartial field is true to indicate that Amazon Transcribe has additional transcription data to send, false to indicate that this is the last transcription result for the segment.

", + "StartMedicalStreamTranscriptionRequest$ShowSpeakerLabel": "

When true, enables speaker identification in your real-time stream.

", + "StartMedicalStreamTranscriptionRequest$EnableChannelIdentification": "

When true, instructs Amazon Transcribe Medical to process each audio channel separately and then merge the transcription output of each channel into a single transcription.

Amazon Transcribe Medical also produces a transcription of each item. An item includes the start time, end time, and any alternative transcriptions.

You can't set both ShowSpeakerLabel and EnableChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

", + "StartMedicalStreamTranscriptionResponse$ShowSpeakerLabel": "

Shows whether speaker identification was enabled in the stream.

", + "StartMedicalStreamTranscriptionResponse$EnableChannelIdentification": "

Shows whether channel identification has been enabled in the stream.

", "StartStreamTranscriptionRequest$ShowSpeakerLabel": "

When true, enables speaker identification in your real-time stream.

", "StartStreamTranscriptionRequest$EnableChannelIdentification": "

When true, instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.

Amazon Transcribe also produces a transcription of each item. An item includes the start time, end time, and any alternative transcriptions.

You can't set both ShowSpeakerLabel and EnableChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

", "StartStreamTranscriptionResponse$ShowSpeakerLabel": "

Shows whether speaker identification was enabled in the stream.

", "StartStreamTranscriptionResponse$EnableChannelIdentification": "

Shows whether channel identification has been enabled in the stream.

" } }, + "Confidence": { + "base": null, + "refs": { + "MedicalItem$Confidence": "

A value between 0 and 1 for an item that is a confidence score that Amazon Transcribe Medical assigns to each word that it transcribes.

" + } + }, "ConflictException": { "base": "

A new stream started with the same session ID. The current stream has been terminated.

", "refs": { + "MedicalTranscriptResultStream$ConflictException": null, "TranscriptResultStream$ConflictException": "

A new stream started with the same session ID. The current stream has been terminated.

" } }, @@ -63,13 +78,18 @@ "refs": { "Item$StartTime": "

The offset from the beginning of the audio stream to the beginning of the audio that resulted in the item.

", "Item$EndTime": "

The offset from the beginning of the audio stream to the end of the audio that resulted in the item.

", + "MedicalItem$StartTime": "

The number of seconds into an audio stream that indicates the creation time of an item.

", + "MedicalItem$EndTime": "

The number of seconds into an audio stream that indicates the creation time of an item.

", + "MedicalResult$StartTime": "

The time, in seconds, from the beginning of the audio stream to the beginning of the result.

", + "MedicalResult$EndTime": "

The time, in seconds, from the beginning of the audio stream to the end of the result.

", "Result$StartTime": "

The offset in seconds from the beginning of the audio stream to the beginning of the result.

", "Result$EndTime": "

The offset in seconds from the beginning of the audio stream to the end of the result.

" } }, "InternalFailureException": { - "base": "

A problem occurred while processing the audio. Amazon Transcribe terminated processing. Try your request again.

", + "base": "

A problem occurred while processing the audio. Amazon Transcribe or Amazon Transcribe Medical terminated processing. Try your request again.

", "refs": { + "MedicalTranscriptResultStream$InternalFailureException": null, "TranscriptResultStream$InternalFailureException": "

A problem occurred while processing the audio. Amazon Transcribe terminated processing.

" } }, @@ -88,12 +108,15 @@ "ItemType": { "base": null, "refs": { - "Item$Type": "

The type of the item. PRONUNCIATION indicates that the item is a word that was recognized in the input audio. PUNCTUATION indicates that the item was interpreted as a pause in the input audio.

" + "Item$Type": "

The type of the item. PRONUNCIATION indicates that the item is a word that was recognized in the input audio. PUNCTUATION indicates that the item was interpreted as a pause in the input audio.

", + "MedicalItem$Type": "

The type of the item. PRONUNCIATION indicates that the item is a word that was recognized in the input audio. PUNCTUATION indicates that the item was interpreted as a pause in the input audio, such as a period to indicate the end of a sentence.

" } }, "LanguageCode": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$LanguageCode": "

Indicates the source language used in the input audio stream. For Amazon Transcribe Medical, this is US English (en-US).

", + "StartMedicalStreamTranscriptionResponse$LanguageCode": "

The language code for the response transcript. For Amazon Transcribe Medical, this is US English (en-US).

", "StartStreamTranscriptionRequest$LanguageCode": "

Indicates the source language used in the input audio stream.

", "StartStreamTranscriptionResponse$LanguageCode": "

The language code for the input audio stream.

" } @@ -101,26 +124,87 @@ "LimitExceededException": { "base": "

You have exceeded the maximum number of concurrent transcription streams, are starting transcription streams too quickly, or the maximum audio length of 4 hours. Wait until a stream has finished processing, or break your audio stream into smaller chunks and try your request again.

", "refs": { + "MedicalTranscriptResultStream$LimitExceededException": null, "TranscriptResultStream$LimitExceededException": "

Your client has exceeded one of the Amazon Transcribe limits, typically the limit on audio length. Break your audio stream into smaller chunks and try your request again.

" } }, "MediaEncoding": { "base": null, "refs": { - "StartStreamTranscriptionRequest$MediaEncoding": "

The encoding used for the input audio. pcm is the only valid value.

", + "StartMedicalStreamTranscriptionRequest$MediaEncoding": "

The encoding used for the input audio.

", + "StartMedicalStreamTranscriptionResponse$MediaEncoding": "

The encoding used for the input audio stream.

", + "StartStreamTranscriptionRequest$MediaEncoding": "

The encoding used for the input audio.

", "StartStreamTranscriptionResponse$MediaEncoding": "

The encoding used for the input audio stream.

" } }, "MediaSampleRateHertz": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$MediaSampleRateHertz": "

The sample rate of the input audio in Hertz. Sample rates of 16000 Hz or higher are accepted.

", + "StartMedicalStreamTranscriptionResponse$MediaSampleRateHertz": "

The sample rate of the input audio in Hertz. Valid value: 16000 Hz.

", "StartStreamTranscriptionRequest$MediaSampleRateHertz": "

The sample rate, in Hertz, of the input audio. We suggest that you use 8000 Hz for low quality audio and 16000 Hz for high quality audio.

", "StartStreamTranscriptionResponse$MediaSampleRateHertz": "

The sample rate for the input audio stream. Use 8000 Hz for low quality audio and 16000 Hz for high quality audio.

" } }, + "MedicalAlternative": { + "base": "

A list of possible transcriptions for the audio.

", + "refs": { + "MedicalAlternativeList$member": null + } + }, + "MedicalAlternativeList": { + "base": null, + "refs": { + "MedicalResult$Alternatives": "

A list of possible transcriptions of the audio. Each alternative typically contains one Item that contains the result of the transcription.

" + } + }, + "MedicalItem": { + "base": "

A word or punctuation that is transcribed from the input audio.

", + "refs": { + "MedicalItemList$member": null + } + }, + "MedicalItemList": { + "base": null, + "refs": { + "MedicalAlternative$Items": "

A list of objects that contains words and punctuation marks that represents one or more interpretations of the input audio.

" + } + }, + "MedicalResult": { + "base": "

The results of transcribing a portion of the input audio stream.

", + "refs": { + "MedicalResultList$member": null + } + }, + "MedicalResultList": { + "base": null, + "refs": { + "MedicalTranscript$Results": "

MedicalResult objects that contain the results of transcribing a portion of the input audio stream. The array can be empty.

" + } + }, + "MedicalTranscript": { + "base": "

The medical transcript in a MedicalTranscriptEvent.

", + "refs": { + "MedicalTranscriptEvent$Transcript": "

The transcription of the audio stream. The transcription is composed of all of the items in the results list.

" + } + }, + "MedicalTranscriptEvent": { + "base": "

Represents a set of transcription results from the server to the client. It contains one or more segments of the transcription.

", + "refs": { + "MedicalTranscriptResultStream$TranscriptEvent": "

A portion of the transcription of the audio stream. Events are sent periodically from Amazon Transcribe Medical to your application. The event can be a partial transcription of a section of the audio stream, or it can be the entire transcription of that portion of the audio stream.

" + } + }, + "MedicalTranscriptResultStream": { + "base": "

Represents the transcription result stream from Amazon Transcribe Medical to your application.

", + "refs": { + "StartMedicalStreamTranscriptionResponse$TranscriptResultStream": "

Represents the stream of transcription events from Amazon Transcribe Medical to your application.

" + } + }, "NumberOfChannels": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$NumberOfChannels": "

The number of channels that are in your audio stream.

", + "StartMedicalStreamTranscriptionResponse$NumberOfChannels": "

The number of channels identified in the stream.

", "StartStreamTranscriptionRequest$NumberOfChannels": "

The number of channels that are in your audio stream.

", "StartStreamTranscriptionResponse$NumberOfChannels": "

The number of channels identified in the stream.

" } @@ -128,6 +212,7 @@ "RequestId": { "base": null, "refs": { + "StartMedicalStreamTranscriptionResponse$RequestId": "

An identifier for the streaming transcription.

", "StartStreamTranscriptionResponse$RequestId": "

An identifier for the streaming transcription.

" } }, @@ -146,16 +231,36 @@ "ServiceUnavailableException": { "base": "

Service is currently unavailable. Try your request later.

", "refs": { + "MedicalTranscriptResultStream$ServiceUnavailableException": null, "TranscriptResultStream$ServiceUnavailableException": "

Service is currently unavailable. Try your request later.

" } }, "SessionId": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$SessionId": "

Optional. An identifier for the transcription session. If you don't provide a session ID, Amazon Transcribe generates one for you and returns it in the response.

", + "StartMedicalStreamTranscriptionResponse$SessionId": "

Optional. An identifier for the transcription session. If you don't provide a session ID, Amazon Transcribe generates one for you and returns it in the response.

", "StartStreamTranscriptionRequest$SessionId": "

A identifier for the transcription session. Use this parameter when you want to retry a session. If you don't provide a session ID, Amazon Transcribe will generate one for you and return it in the response.

", "StartStreamTranscriptionResponse$SessionId": "

An identifier for a specific transcription session.

" } }, + "Specialty": { + "base": null, + "refs": { + "StartMedicalStreamTranscriptionRequest$Specialty": "

The medical specialty of the clinician or provider.

", + "StartMedicalStreamTranscriptionResponse$Specialty": "

The specialty in the medical domain.

" + } + }, + "StartMedicalStreamTranscriptionRequest": { + "base": null, + "refs": { + } + }, + "StartMedicalStreamTranscriptionResponse": { + "base": null, + "refs": { + } + }, "StartStreamTranscriptionRequest": { "base": null, "refs": { @@ -176,6 +281,11 @@ "Item$Content": "

The word or punctuation that was recognized in the input audio.

", "Item$Speaker": "

If speaker identification is enabled, shows the speakers identified in the real-time stream.

", "LimitExceededException$Message": null, + "MedicalAlternative$Transcript": "

The text that was transcribed from the audio.

", + "MedicalItem$Content": "

The word or punctuation mark that was recognized in the input audio.

", + "MedicalItem$Speaker": "

If speaker identification is enabled, shows the integer values that correspond to the different speakers identified in the stream. For example, if the value of Speaker in the stream is either a 0 or a 1, that indicates that Amazon Transcribe Medical has identified two speakers in the stream. The value of 0 corresponds to one speaker and the value of 1 corresponds to the other speaker.

", + "MedicalResult$ResultId": "

A unique identifier for the result.

", + "MedicalResult$ChannelId": "

When channel identification is enabled, Amazon Transcribe Medical transcribes the speech from each audio channel separately.

You can use ChannelId to retrieve the transcription results for a single channel in your audio stream.

", "Result$ResultId": "

A unique identifier for the result.

", "Result$ChannelId": "

When channel identification is enabled, Amazon Transcribe transcribes the speech from each audio channel separately.

You can use ChannelId to retrieve the transcription results for a single channel in your audio stream.

", "ServiceUnavailableException$Message": null @@ -199,6 +309,13 @@ "StartStreamTranscriptionResponse$TranscriptResultStream": "

Represents the stream of transcription events from Amazon Transcribe to your application.

" } }, + "Type": { + "base": null, + "refs": { + "StartMedicalStreamTranscriptionRequest$Type": "

The type of input audio. Choose DICTATION for a provider dictating patient notes. Choose CONVERSATION for a dialogue between a patient and one or more medical professionanls.

", + "StartMedicalStreamTranscriptionResponse$Type": "

The type of audio that was transcribed.

" + } + }, "VocabularyFilterMethod": { "base": null, "refs": { @@ -216,6 +333,8 @@ "VocabularyName": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$VocabularyName": "

The name of the medical custom vocabulary to use when processing the real-time stream.

", + "StartMedicalStreamTranscriptionResponse$VocabularyName": "

The name of the vocabulary used when processing the stream.

", "StartStreamTranscriptionRequest$VocabularyName": "

The name of the vocabulary to use when processing the transcription job.

", "StartStreamTranscriptionResponse$VocabularyName": "

The name of the vocabulary used when processing the stream.

" } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 92cb2ded7bb..973270b68c5 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -5462,6 +5462,7 @@ }, "snowball" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -7046,6 +7047,12 @@ "cn-northwest-1" : { } } }, + "ram" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "rds" : { "endpoints" : { "cn-north-1" : { }, diff --git a/service/appflow/api.go b/service/appflow/api.go index 6d5bf186bc1..502925eebb0 100644 --- a/service/appflow/api.go +++ b/service/appflow/api.go @@ -2425,6 +2425,9 @@ type ConnectorMetadata struct { // The connector metadata specific to Trend Micro. Trendmicro *TrendmicroMetadata `type:"structure"` + // The connector metadata specific to Upsolver. + Upsolver *UpsolverMetadata `type:"structure"` + // The connector metadata specific to Veeva. Veeva *VeevaMetadata `type:"structure"` @@ -2532,6 +2535,12 @@ func (s *ConnectorMetadata) SetTrendmicro(v *TrendmicroMetadata) *ConnectorMetad return s } +// SetUpsolver sets the Upsolver field's value. +func (s *ConnectorMetadata) SetUpsolver(v *UpsolverMetadata) *ConnectorMetadata { + s.Upsolver = v + return s +} + // SetVeeva sets the Veeva field's value. func (s *ConnectorMetadata) SetVeeva(v *VeevaMetadata) *ConnectorMetadata { s.Veeva = v @@ -4535,6 +4544,9 @@ type DestinationConnectorProperties struct { // The properties required to query Snowflake. Snowflake *SnowflakeDestinationProperties `type:"structure"` + + // The properties required to query Upsolver. + Upsolver *UpsolverDestinationProperties `type:"structure"` } // String returns the string representation @@ -4575,6 +4587,11 @@ func (s *DestinationConnectorProperties) Validate() error { invalidParams.AddNested("Snowflake", err.(request.ErrInvalidParams)) } } + if s.Upsolver != nil { + if err := s.Upsolver.Validate(); err != nil { + invalidParams.AddNested("Upsolver", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4612,6 +4629,12 @@ func (s *DestinationConnectorProperties) SetSnowflake(v *SnowflakeDestinationPro return s } +// SetUpsolver sets the Upsolver field's value. +func (s *DestinationConnectorProperties) SetUpsolver(v *UpsolverDestinationProperties) *DestinationConnectorProperties { + s.Upsolver = v + return s +} + // The properties that can be applied to a field when connector is being used // as a destination. type DestinationFieldProperties struct { @@ -6959,7 +6982,8 @@ type ScheduledTriggerProperties struct { // Specifies the scheduled end time for a schedule-triggered flow. ScheduleEndTime *time.Time `locationName:"scheduleEndTime" type:"timestamp"` - // The scheduling expression that determines when and how often the rule runs. + // The scheduling expression that determines the rate at which the schedule + // will run, for example rate(5minutes). // // ScheduleExpression is a required field ScheduleExpression *string `locationName:"scheduleExpression" type:"string" required:"true"` @@ -9028,6 +9052,156 @@ func (s *UpdateFlowOutput) SetFlowStatus(v string) *UpdateFlowOutput { return s } +// The properties that are applied when Upsolver is used as a destination. +type UpsolverDestinationProperties struct { + _ struct{} `type:"structure"` + + // The Upsolver Amazon S3 bucket name in which Amazon AppFlow places the transferred + // data. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"16" type:"string" required:"true"` + + // The object key for the destination Upsolver Amazon S3 bucket in which Amazon + // AppFlow places the files. + BucketPrefix *string `locationName:"bucketPrefix" type:"string"` + + // The configuration that determines how data is formatted when Upsolver is + // used as the flow destination. + // + // S3OutputFormatConfig is a required field + S3OutputFormatConfig *UpsolverS3OutputFormatConfig `locationName:"s3OutputFormatConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpsolverDestinationProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpsolverDestinationProperties) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpsolverDestinationProperties) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpsolverDestinationProperties"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 16 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 16)) + } + if s.S3OutputFormatConfig == nil { + invalidParams.Add(request.NewErrParamRequired("S3OutputFormatConfig")) + } + if s.S3OutputFormatConfig != nil { + if err := s.S3OutputFormatConfig.Validate(); err != nil { + invalidParams.AddNested("S3OutputFormatConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *UpsolverDestinationProperties) SetBucketName(v string) *UpsolverDestinationProperties { + s.BucketName = &v + return s +} + +// SetBucketPrefix sets the BucketPrefix field's value. +func (s *UpsolverDestinationProperties) SetBucketPrefix(v string) *UpsolverDestinationProperties { + s.BucketPrefix = &v + return s +} + +// SetS3OutputFormatConfig sets the S3OutputFormatConfig field's value. +func (s *UpsolverDestinationProperties) SetS3OutputFormatConfig(v *UpsolverS3OutputFormatConfig) *UpsolverDestinationProperties { + s.S3OutputFormatConfig = v + return s +} + +// The connector metadata specific to Upsolver. +type UpsolverMetadata struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpsolverMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpsolverMetadata) GoString() string { + return s.String() +} + +// The configuration that determines how Amazon AppFlow formats the flow output +// data when Upsolver is used as the destination. +type UpsolverS3OutputFormatConfig struct { + _ struct{} `type:"structure"` + + // The aggregation settings that you can use to customize the output format + // of your flow data. + AggregationConfig *AggregationConfig `locationName:"aggregationConfig" type:"structure"` + + // Indicates the file type that Amazon AppFlow places in the Upsolver Amazon + // S3 bucket. + FileType *string `locationName:"fileType" type:"string" enum:"FileType"` + + // Determines the prefix that Amazon AppFlow applies to the destination folder + // name. You can name your destination folders according to the flow frequency + // and date. + // + // PrefixConfig is a required field + PrefixConfig *PrefixConfig `locationName:"prefixConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpsolverS3OutputFormatConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpsolverS3OutputFormatConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpsolverS3OutputFormatConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpsolverS3OutputFormatConfig"} + if s.PrefixConfig == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixConfig")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregationConfig sets the AggregationConfig field's value. +func (s *UpsolverS3OutputFormatConfig) SetAggregationConfig(v *AggregationConfig) *UpsolverS3OutputFormatConfig { + s.AggregationConfig = v + return s +} + +// SetFileType sets the FileType field's value. +func (s *UpsolverS3OutputFormatConfig) SetFileType(v string) *UpsolverS3OutputFormatConfig { + s.FileType = &v + return s +} + +// SetPrefixConfig sets the PrefixConfig field's value. +func (s *UpsolverS3OutputFormatConfig) SetPrefixConfig(v *PrefixConfig) *UpsolverS3OutputFormatConfig { + s.PrefixConfig = v + return s +} + // The request has invalid or missing parameters. type ValidationException struct { _ struct{} `type:"structure"` @@ -9500,6 +9674,9 @@ const ( // ConnectorTypeEventBridge is a ConnectorType enum value ConnectorTypeEventBridge = "EventBridge" + + // ConnectorTypeUpsolver is a ConnectorType enum value + ConnectorTypeUpsolver = "Upsolver" ) // ConnectorType_Values returns all elements of the ConnectorType enum @@ -9522,6 +9699,7 @@ func ConnectorType_Values() []string { ConnectorTypeAmplitude, ConnectorTypeVeeva, ConnectorTypeEventBridge, + ConnectorTypeUpsolver, } } diff --git a/service/batch/api.go b/service/batch/api.go index 9de96e5dbfe..3950091b1b7 100644 --- a/service/batch/api.go +++ b/service/batch/api.go @@ -2507,13 +2507,20 @@ type ComputeResource struct { // The desired number of Amazon EC2 vCPUS in the compute environment. DesiredvCpus *int64 `locationName:"desiredvCpus" type:"integer"` + // Provides additional details used to selecting the AMI to use for instances + // in a compute environment. + Ec2Configuration []*Ec2Configuration `locationName:"ec2Configuration" type:"list"` + // The Amazon EC2 key pair that is used for instances launched in the compute // environment. Ec2KeyPair *string `locationName:"ec2KeyPair" type:"string"` // The Amazon Machine Image (AMI) ID used for instances launched in the compute - // environment. - ImageId *string `locationName:"imageId" type:"string"` + // environment. This parameter is overridden by the imageIdOverride member of + // the Ec2Configuration structure. + // + // Deprecated: This field is deprecated, use ec2Configuration[].imageIdOverride instead. + ImageId *string `locationName:"imageId" deprecated:"true" type:"string"` // The Amazon ECS instance profile applied to Amazon EC2 instances in a compute // environment. You can specify the short name or full Amazon Resource Name @@ -2629,6 +2636,16 @@ func (s *ComputeResource) Validate() error { if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } + if s.Ec2Configuration != nil { + for i, v := range s.Ec2Configuration { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Ec2Configuration", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2654,6 +2671,12 @@ func (s *ComputeResource) SetDesiredvCpus(v int64) *ComputeResource { return s } +// SetEc2Configuration sets the Ec2Configuration field's value. +func (s *ComputeResource) SetEc2Configuration(v []*Ec2Configuration) *ComputeResource { + s.Ec2Configuration = v + return s +} + // SetEc2KeyPair sets the Ec2KeyPair field's value. func (s *ComputeResource) SetEc2KeyPair(v string) *ComputeResource { s.Ec2KeyPair = &v @@ -4387,6 +4410,85 @@ func (s *Device) SetPermissions(v []*string) *Device { return s } +// Provides information used to select Amazon Machine Images (AMIs) for instances +// in the compute environment. If the Ec2Configuration is not specified, the +// default is ECS_AL1. +type Ec2Configuration struct { + _ struct{} `type:"structure"` + + // The AMI ID used for instances launched in the compute environment that match + // the image type. This setting overrides the imageId set in the computeResource + // object. + ImageIdOverride *string `locationName:"imageIdOverride" min:"1" type:"string"` + + // The image type to match with the instance type to pick an AMI. If the imageIdOverride + // parameter is not specified, then a recent Amazon ECS-optimized AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) + // will be used. + // + // ECS_AL2 + // + // Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)− + // Default for all AWS Graviton-based instance families (for example, C6g, M6g, + // R6g, and T4g) and can be used for all non-GPU instance types. + // + // ECS_AL2_NVIDIA + // + // Amazon Linux 2 (GPU) (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami)−Default + // for all GPU instance families (for example P4 and G4) and can be used for + // all non-AWS Graviton-based instance types. + // + // ECS_AL1 + // + // Amazon Linux (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami)−Default + // for all non-GPU, non-AWS-Graviton instance families. Amazon Linux is reaching + // the end-of-life of standard support. For more information, see Amazon Linux + // AMI (https://aws.amazon.com/amazon-linux-ami/). + // + // ImageType is a required field + ImageType *string `locationName:"imageType" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Ec2Configuration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ec2Configuration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Ec2Configuration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Ec2Configuration"} + if s.ImageIdOverride != nil && len(*s.ImageIdOverride) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIdOverride", 1)) + } + if s.ImageType == nil { + invalidParams.Add(request.NewErrParamRequired("ImageType")) + } + if s.ImageType != nil && len(*s.ImageType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageIdOverride sets the ImageIdOverride field's value. +func (s *Ec2Configuration) SetImageIdOverride(v string) *Ec2Configuration { + s.ImageIdOverride = &v + return s +} + +// SetImageType sets the ImageType field's value. +func (s *Ec2Configuration) SetImageType(v string) *Ec2Configuration { + s.ImageType = &v + return s +} + // Specifies a set of conditions to be met, and an action to take (RETRY or // EXIT) if all conditions are met. type EvaluateOnExit struct { diff --git a/service/cloudformation/api.go b/service/cloudformation/api.go index 34a976c8830..498b77207fb 100644 --- a/service/cloudformation/api.go +++ b/service/cloudformation/api.go @@ -7805,9 +7805,9 @@ func (s DeleteStackSetOutput) GoString() string { } // [Service-managed permissions] The AWS Organizations accounts to which StackSets -// deploys. StackSets does not deploy stack instances to the organization master -// account, even if the master account is in your organization or in an OU in -// your organization. +// deploys. StackSets does not deploy stack instances to the organization management +// account, even if the organization management account is in your organization +// or in an OU in your organization. // // For update operations, you can specify either Accounts or OrganizationalUnitIds. // For create and delete operations, specify OrganizationalUnitIds. @@ -11637,6 +11637,9 @@ type ListTypesInput struct { // handlers, and therefore cannot actually be provisioned. ProvisioningType *string `type:"string" enum:"ProvisioningType"` + // The type of extension. + Type *string `type:"string" enum:"RegistryType"` + // The scope at which the type is visible and usable in CloudFormation operations. // // Valid values include: @@ -11702,6 +11705,12 @@ func (s *ListTypesInput) SetProvisioningType(v string) *ListTypesInput { return s } +// SetType sets the Type field's value. +func (s *ListTypesInput) SetType(v string) *ListTypesInput { + s.Type = &v + return s +} + // SetVisibility sets the Visibility field's value. func (s *ListTypesInput) SetVisibility(v string) *ListTypesInput { s.Visibility = &v @@ -11805,6 +11814,62 @@ func (s *LoggingConfig) SetLogRoleArn(v string) *LoggingConfig { return s } +// Contains information about the module from which the resource was created, +// if the resource was created from a module included in the stack template. +// +// For more information on modules, see Using modules to encapsulate and reuse +// resource configurations (AWSCloudFormation/latest/UserGuide/modules.html) +// in the CloudFormation User Guide. +type ModuleInfo struct { + _ struct{} `type:"structure"` + + // A concantenated list of the logical IDs of the module or modules containing + // the resource. Modules are listed starting with the inner-most nested module, + // and separated by /. + // + // In the following example, the resource was created from a module, moduleA, + // that is nested inside a parent module, moduleB. + // + // moduleA/moduleB + // + // For more information, see Referencing resources in a module (AWSCloudFormation/latest/UserGuide/modules.html#module-ref-resources) + // in the CloudFormation User Guide. + LogicalIdHierarchy *string `type:"string"` + + // A concantenated list of the the module type or types containing the resource. + // Module types are listed starting with the inner-most nested module, and separated + // by /. + // + // In the following example, the resource was created from a module of type + // AWS::First::Example::MODULE, that is nested inside a parent module of type + // AWS::Second::Example::MODULE. + // + // AWS::First::Example::MODULE/AWS::Second::Example::MODULE + TypeHierarchy *string `type:"string"` +} + +// String returns the string representation +func (s ModuleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModuleInfo) GoString() string { + return s.String() +} + +// SetLogicalIdHierarchy sets the LogicalIdHierarchy field's value. +func (s *ModuleInfo) SetLogicalIdHierarchy(v string) *ModuleInfo { + s.LogicalIdHierarchy = &v + return s +} + +// SetTypeHierarchy sets the TypeHierarchy field's value. +func (s *ModuleInfo) SetTypeHierarchy(v string) *ModuleInfo { + s.TypeHierarchy = &v + return s +} + // The Output data type. type Output struct { _ struct{} `type:"structure"` @@ -12438,6 +12503,10 @@ type ResourceChange struct { // The resource's logical ID, which is defined in the stack's template. LogicalResourceId *string `type:"string"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The resource's physical ID (resource name). Resources that you are adding // don't have physical IDs because they haven't been created. PhysicalResourceId *string `type:"string"` @@ -12498,6 +12567,12 @@ func (s *ResourceChange) SetLogicalResourceId(v string) *ResourceChange { return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *ResourceChange) SetModuleInfo(v *ModuleInfo) *ResourceChange { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *ResourceChange) SetPhysicalResourceId(v string) *ResourceChange { s.PhysicalResourceId = &v @@ -14114,6 +14189,10 @@ type StackResource struct { // LogicalResourceId is a required field LogicalResourceId *string `type:"string" required:"true"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The name or unique identifier that corresponds to a physical instance ID // of a resource supported by AWS CloudFormation. PhysicalResourceId *string `type:"string"` @@ -14173,6 +14252,12 @@ func (s *StackResource) SetLogicalResourceId(v string) *StackResource { return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *StackResource) SetModuleInfo(v *ModuleInfo) *StackResource { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *StackResource) SetPhysicalResourceId(v string) *StackResource { s.PhysicalResourceId = &v @@ -14243,6 +14328,10 @@ type StackResourceDetail struct { // in the AWS CloudFormation User Guide. Metadata *string `type:"string"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The name or unique identifier that corresponds to a physical instance ID // of a resource supported by AWS CloudFormation. PhysicalResourceId *string `type:"string"` @@ -14309,6 +14398,12 @@ func (s *StackResourceDetail) SetMetadata(v string) *StackResourceDetail { return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *StackResourceDetail) SetModuleInfo(v *ModuleInfo) *StackResourceDetail { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *StackResourceDetail) SetPhysicalResourceId(v string) *StackResourceDetail { s.PhysicalResourceId = &v @@ -14379,6 +14474,10 @@ type StackResourceDrift struct { // LogicalResourceId is a required field LogicalResourceId *string `type:"string" required:"true"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The name or unique identifier that corresponds to a physical instance ID // of a resource supported by AWS CloudFormation. PhysicalResourceId *string `type:"string"` @@ -14456,6 +14555,12 @@ func (s *StackResourceDrift) SetLogicalResourceId(v string) *StackResourceDrift return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *StackResourceDrift) SetModuleInfo(v *ModuleInfo) *StackResourceDrift { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *StackResourceDrift) SetPhysicalResourceId(v string) *StackResourceDrift { s.PhysicalResourceId = &v @@ -14621,6 +14726,10 @@ type StackResourceSummary struct { // LogicalResourceId is a required field LogicalResourceId *string `type:"string" required:"true"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The name or unique identifier that corresponds to a physical instance ID // of the resource. PhysicalResourceId *string `type:"string"` @@ -14669,6 +14778,12 @@ func (s *StackResourceSummary) SetLogicalResourceId(v string) *StackResourceSumm return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *StackResourceSummary) SetModuleInfo(v *ModuleInfo) *StackResourceSummary { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *StackResourceSummary) SetPhysicalResourceId(v string) *StackResourceSummary { s.PhysicalResourceId = &v @@ -17652,12 +17767,16 @@ func RegistrationStatus_Values() []string { const ( // RegistryTypeResource is a RegistryType enum value RegistryTypeResource = "RESOURCE" + + // RegistryTypeModule is a RegistryType enum value + RegistryTypeModule = "MODULE" ) // RegistryType_Values returns all elements of the RegistryType enum func RegistryType_Values() []string { return []string{ RegistryTypeResource, + RegistryTypeModule, } } diff --git a/service/cloudtrail/api.go b/service/cloudtrail/api.go index 5c1a5889e42..530b9ee5aaf 100644 --- a/service/cloudtrail/api.go +++ b/service/cloudtrail/api.go @@ -255,8 +255,9 @@ func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.R // valid. // // * KmsKeyNotFoundException -// This exception is thrown when the KMS key does not exist, or when the S3 -// bucket and the KMS key are not in the same region. +// This exception is thrown when the KMS key does not exist, when the S3 bucket +// and the KMS key are not in the same region, or when the KMS key associated +// with the SNS topic either does not exist or is not in the same region. // // * KmsKeyDisabledException // This exception is no longer in use. @@ -1535,8 +1536,8 @@ func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request // with a maximum of 50 possible. The response includes a token that you can // use to get the next page of results. // -// The rate of lookup requests is limited to two per second per account. If -// this limit is exceeded, a throttling error occurs. +// The rate of lookup requests is limited to two per second, per account, per +// region. If this limit is exceeded, a throttling error occurs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2443,8 +2444,9 @@ func (c *CloudTrail) UpdateTrailRequest(input *UpdateTrailInput) (req *request.R // other than the region in which the trail was created. // // * KmsKeyNotFoundException -// This exception is thrown when the KMS key does not exist, or when the S3 -// bucket and the KMS key are not in the same region. +// This exception is thrown when the KMS key does not exist, when the S3 bucket +// and the KMS key are not in the same region, or when the KMS key associated +// with the SNS topic either does not exist or is not in the same region. // // * KmsKeyDisabledException // This exception is no longer in use. @@ -2714,6 +2716,175 @@ func (s AddTagsOutput) GoString() string { return s.String() } +type AdvancedEventSelector struct { + _ struct{} `type:"structure"` + + // FieldSelectors is a required field + FieldSelectors []*AdvancedFieldSelector `min:"1" type:"list" required:"true"` + + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdvancedEventSelector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedEventSelector) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdvancedEventSelector) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdvancedEventSelector"} + if s.FieldSelectors == nil { + invalidParams.Add(request.NewErrParamRequired("FieldSelectors")) + } + if s.FieldSelectors != nil && len(s.FieldSelectors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FieldSelectors", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.FieldSelectors != nil { + for i, v := range s.FieldSelectors { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FieldSelectors", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFieldSelectors sets the FieldSelectors field's value. +func (s *AdvancedEventSelector) SetFieldSelectors(v []*AdvancedFieldSelector) *AdvancedEventSelector { + s.FieldSelectors = v + return s +} + +// SetName sets the Name field's value. +func (s *AdvancedEventSelector) SetName(v string) *AdvancedEventSelector { + s.Name = &v + return s +} + +type AdvancedFieldSelector struct { + _ struct{} `type:"structure"` + + EndsWith []*string `min:"1" type:"list"` + + Equals []*string `min:"1" type:"list"` + + // Field is a required field + Field *string `min:"1" type:"string" required:"true"` + + NotEndsWith []*string `min:"1" type:"list"` + + NotEquals []*string `min:"1" type:"list"` + + NotStartsWith []*string `min:"1" type:"list"` + + StartsWith []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s AdvancedFieldSelector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedFieldSelector) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdvancedFieldSelector) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdvancedFieldSelector"} + if s.EndsWith != nil && len(s.EndsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EndsWith", 1)) + } + if s.Equals != nil && len(s.Equals) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Equals", 1)) + } + if s.Field == nil { + invalidParams.Add(request.NewErrParamRequired("Field")) + } + if s.Field != nil && len(*s.Field) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Field", 1)) + } + if s.NotEndsWith != nil && len(s.NotEndsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotEndsWith", 1)) + } + if s.NotEquals != nil && len(s.NotEquals) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotEquals", 1)) + } + if s.NotStartsWith != nil && len(s.NotStartsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotStartsWith", 1)) + } + if s.StartsWith != nil && len(s.StartsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartsWith", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndsWith sets the EndsWith field's value. +func (s *AdvancedFieldSelector) SetEndsWith(v []*string) *AdvancedFieldSelector { + s.EndsWith = v + return s +} + +// SetEquals sets the Equals field's value. +func (s *AdvancedFieldSelector) SetEquals(v []*string) *AdvancedFieldSelector { + s.Equals = v + return s +} + +// SetField sets the Field field's value. +func (s *AdvancedFieldSelector) SetField(v string) *AdvancedFieldSelector { + s.Field = &v + return s +} + +// SetNotEndsWith sets the NotEndsWith field's value. +func (s *AdvancedFieldSelector) SetNotEndsWith(v []*string) *AdvancedFieldSelector { + s.NotEndsWith = v + return s +} + +// SetNotEquals sets the NotEquals field's value. +func (s *AdvancedFieldSelector) SetNotEquals(v []*string) *AdvancedFieldSelector { + s.NotEquals = v + return s +} + +// SetNotStartsWith sets the NotStartsWith field's value. +func (s *AdvancedFieldSelector) SetNotStartsWith(v []*string) *AdvancedFieldSelector { + s.NotStartsWith = v + return s +} + +// SetStartsWith sets the StartsWith field's value. +func (s *AdvancedFieldSelector) SetStartsWith(v []*string) *AdvancedFieldSelector { + s.StartsWith = v + return s +} + // Cannot set a CloudWatch Logs delivery for this region. type CloudWatchLogsDeliveryUnavailableException struct { _ struct{} `type:"structure"` @@ -3504,6 +3675,11 @@ type EventSelector struct { // in the AWS CloudTrail User Guide. // // By default, the value is true. + // + // The first copy of management events is free. You are charged for additional + // copies of management events that you are logging on any subsequent trail + // in the same region. For more information about CloudTrail pricing, see AWS + // CloudTrail Pricing (http://aws.amazon.com/cloudtrail/pricing/). IncludeManagementEvents *bool `type:"boolean"` // Specify if you want your trail to log read-only events, write-only events, @@ -3606,6 +3782,8 @@ func (s *GetEventSelectorsInput) SetTrailName(v string) *GetEventSelectorsInput type GetEventSelectorsOutput struct { _ struct{} `type:"structure"` + AdvancedEventSelectors []*AdvancedEventSelector `type:"list"` + // The event selectors that are configured for the trail. EventSelectors []*EventSelector `type:"list"` @@ -3623,6 +3801,12 @@ func (s GetEventSelectorsOutput) GoString() string { return s.String() } +// SetAdvancedEventSelectors sets the AdvancedEventSelectors field's value. +func (s *GetEventSelectorsOutput) SetAdvancedEventSelectors(v []*AdvancedEventSelector) *GetEventSelectorsOutput { + s.AdvancedEventSelectors = v + return s +} + // SetEventSelectors sets the EventSelectors field's value. func (s *GetEventSelectorsOutput) SetEventSelectors(v []*EventSelector) *GetEventSelectorsOutput { s.EventSelectors = v @@ -5496,8 +5680,9 @@ func (s *KmsKeyDisabledException) RequestID() string { return s.RespMetadata.RequestID } -// This exception is thrown when the KMS key does not exist, or when the S3 -// bucket and the KMS key are not in the same region. +// This exception is thrown when the KMS key does not exist, when the S3 bucket +// and the KMS key are not in the same region, or when the KMS key associated +// with the SNS topic either does not exist or is not in the same region. type KmsKeyNotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -6323,11 +6508,11 @@ func (s *PublicKey) SetValue(v []byte) *PublicKey { type PutEventSelectorsInput struct { _ struct{} `type:"structure"` + AdvancedEventSelectors []*AdvancedEventSelector `type:"list"` + // Specifies the settings for your event selectors. You can configure up to // five event selectors for a trail. - // - // EventSelectors is a required field - EventSelectors []*EventSelector `type:"list" required:"true"` + EventSelectors []*EventSelector `type:"list"` // Specifies the name of the trail or trail ARN. If you specify a trail name, // the string must meet the following requirements: @@ -6365,12 +6550,19 @@ func (s PutEventSelectorsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutEventSelectorsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutEventSelectorsInput"} - if s.EventSelectors == nil { - invalidParams.Add(request.NewErrParamRequired("EventSelectors")) - } if s.TrailName == nil { invalidParams.Add(request.NewErrParamRequired("TrailName")) } + if s.AdvancedEventSelectors != nil { + for i, v := range s.AdvancedEventSelectors { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdvancedEventSelectors", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6378,6 +6570,12 @@ func (s *PutEventSelectorsInput) Validate() error { return nil } +// SetAdvancedEventSelectors sets the AdvancedEventSelectors field's value. +func (s *PutEventSelectorsInput) SetAdvancedEventSelectors(v []*AdvancedEventSelector) *PutEventSelectorsInput { + s.AdvancedEventSelectors = v + return s +} + // SetEventSelectors sets the EventSelectors field's value. func (s *PutEventSelectorsInput) SetEventSelectors(v []*EventSelector) *PutEventSelectorsInput { s.EventSelectors = v @@ -6393,6 +6591,8 @@ func (s *PutEventSelectorsInput) SetTrailName(v string) *PutEventSelectorsInput type PutEventSelectorsOutput struct { _ struct{} `type:"structure"` + AdvancedEventSelectors []*AdvancedEventSelector `type:"list"` + // Specifies the event selectors configured for your trail. EventSelectors []*EventSelector `type:"list"` @@ -6413,6 +6613,12 @@ func (s PutEventSelectorsOutput) GoString() string { return s.String() } +// SetAdvancedEventSelectors sets the AdvancedEventSelectors field's value. +func (s *PutEventSelectorsOutput) SetAdvancedEventSelectors(v []*AdvancedEventSelector) *PutEventSelectorsOutput { + s.AdvancedEventSelectors = v + return s +} + // SetEventSelectors sets the EventSelectors field's value. func (s *PutEventSelectorsOutput) SetEventSelectors(v []*EventSelector) *PutEventSelectorsOutput { s.EventSelectors = v diff --git a/service/cloudtrail/errors.go b/service/cloudtrail/errors.go index 5feef21ae67..34b3cb28640 100644 --- a/service/cloudtrail/errors.go +++ b/service/cloudtrail/errors.go @@ -230,8 +230,9 @@ const ( // ErrCodeKmsKeyNotFoundException for service response error code // "KmsKeyNotFoundException". // - // This exception is thrown when the KMS key does not exist, or when the S3 - // bucket and the KMS key are not in the same region. + // This exception is thrown when the KMS key does not exist, when the S3 bucket + // and the KMS key are not in the same region, or when the KMS key associated + // with the SNS topic either does not exist or is not in the same region. ErrCodeKmsKeyNotFoundException = "KmsKeyNotFoundException" // ErrCodeMaximumNumberOfTrailsExceededException for service response error code diff --git a/service/codebuild/api.go b/service/codebuild/api.go index a336d52067e..ac056599539 100644 --- a/service/codebuild/api.go +++ b/service/codebuild/api.go @@ -1606,6 +1606,86 @@ func (c *CodeBuild) DescribeTestCasesPagesWithContext(ctx aws.Context, input *De return p.Err() } +const opGetReportGroupTrend = "GetReportGroupTrend" + +// GetReportGroupTrendRequest generates a "aws/request.Request" representing the +// client's request for the GetReportGroupTrend operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetReportGroupTrend for more information on using the GetReportGroupTrend +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetReportGroupTrendRequest method. +// req, resp := client.GetReportGroupTrendRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/GetReportGroupTrend +func (c *CodeBuild) GetReportGroupTrendRequest(input *GetReportGroupTrendInput) (req *request.Request, output *GetReportGroupTrendOutput) { + op := &request.Operation{ + Name: opGetReportGroupTrend, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetReportGroupTrendInput{} + } + + output = &GetReportGroupTrendOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetReportGroupTrend API operation for AWS CodeBuild. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation GetReportGroupTrend for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// * ResourceNotFoundException +// The specified AWS resource cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/GetReportGroupTrend +func (c *CodeBuild) GetReportGroupTrend(input *GetReportGroupTrendInput) (*GetReportGroupTrendOutput, error) { + req, out := c.GetReportGroupTrendRequest(input) + return out, req.Send() +} + +// GetReportGroupTrendWithContext is the same as GetReportGroupTrend with the addition of +// the ability to pass a context and additional request options. +// +// See GetReportGroupTrend for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) GetReportGroupTrendWithContext(ctx aws.Context, input *GetReportGroupTrendInput, opts ...request.Option) (*GetReportGroupTrendOutput, error) { + req, out := c.GetReportGroupTrendRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetResourcePolicy = "GetResourcePolicy" // GetResourcePolicyRequest generates a "aws/request.Request" representing the @@ -7875,6 +7955,98 @@ func (s *ExportedEnvironmentVariable) SetValue(v string) *ExportedEnvironmentVar return s } +type GetReportGroupTrendInput struct { + _ struct{} `type:"structure"` + + NumOfReports *int64 `locationName:"numOfReports" min:"1" type:"integer"` + + // ReportGroupArn is a required field + ReportGroupArn *string `locationName:"reportGroupArn" min:"1" type:"string" required:"true"` + + // TrendField is a required field + TrendField *string `locationName:"trendField" type:"string" required:"true" enum:"ReportGroupTrendFieldType"` +} + +// String returns the string representation +func (s GetReportGroupTrendInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReportGroupTrendInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetReportGroupTrendInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetReportGroupTrendInput"} + if s.NumOfReports != nil && *s.NumOfReports < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumOfReports", 1)) + } + if s.ReportGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReportGroupArn")) + } + if s.ReportGroupArn != nil && len(*s.ReportGroupArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReportGroupArn", 1)) + } + if s.TrendField == nil { + invalidParams.Add(request.NewErrParamRequired("TrendField")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNumOfReports sets the NumOfReports field's value. +func (s *GetReportGroupTrendInput) SetNumOfReports(v int64) *GetReportGroupTrendInput { + s.NumOfReports = &v + return s +} + +// SetReportGroupArn sets the ReportGroupArn field's value. +func (s *GetReportGroupTrendInput) SetReportGroupArn(v string) *GetReportGroupTrendInput { + s.ReportGroupArn = &v + return s +} + +// SetTrendField sets the TrendField field's value. +func (s *GetReportGroupTrendInput) SetTrendField(v string) *GetReportGroupTrendInput { + s.TrendField = &v + return s +} + +type GetReportGroupTrendOutput struct { + _ struct{} `type:"structure"` + + RawData []*ReportWithRawData `locationName:"rawData" type:"list"` + + Stats *ReportGroupTrendStats `locationName:"stats" type:"structure"` +} + +// String returns the string representation +func (s GetReportGroupTrendOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReportGroupTrendOutput) GoString() string { + return s.String() +} + +// SetRawData sets the RawData field's value. +func (s *GetReportGroupTrendOutput) SetRawData(v []*ReportWithRawData) *GetReportGroupTrendOutput { + s.RawData = v + return s +} + +// SetStats sets the Stats field's value. +func (s *GetReportGroupTrendOutput) SetStats(v *ReportGroupTrendStats) *GetReportGroupTrendOutput { + s.Stats = v + return s +} + type GetResourcePolicyInput struct { _ struct{} `type:"structure"` @@ -11397,6 +11569,74 @@ func (s *ReportGroup) SetType(v string) *ReportGroup { return s } +type ReportGroupTrendStats struct { + _ struct{} `type:"structure"` + + Average *string `locationName:"average" type:"string"` + + Max *string `locationName:"max" type:"string"` + + Min *string `locationName:"min" type:"string"` +} + +// String returns the string representation +func (s ReportGroupTrendStats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportGroupTrendStats) GoString() string { + return s.String() +} + +// SetAverage sets the Average field's value. +func (s *ReportGroupTrendStats) SetAverage(v string) *ReportGroupTrendStats { + s.Average = &v + return s +} + +// SetMax sets the Max field's value. +func (s *ReportGroupTrendStats) SetMax(v string) *ReportGroupTrendStats { + s.Max = &v + return s +} + +// SetMin sets the Min field's value. +func (s *ReportGroupTrendStats) SetMin(v string) *ReportGroupTrendStats { + s.Min = &v + return s +} + +type ReportWithRawData struct { + _ struct{} `type:"structure"` + + Data *string `locationName:"data" type:"string"` + + ReportArn *string `locationName:"reportArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s ReportWithRawData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportWithRawData) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *ReportWithRawData) SetData(v string) *ReportWithRawData { + s.Data = &v + return s +} + +// SetReportArn sets the ReportArn field's value. +func (s *ReportWithRawData) SetReportArn(v string) *ReportWithRawData { + s.ReportArn = &v + return s +} + // Represents a resolved build artifact. A resolve artifact is an artifact that // is built and deployed to the destination, such as Amazon Simple Storage Service // (Amazon S3). @@ -14672,6 +14912,50 @@ func ReportGroupStatusType_Values() []string { } } +const ( + // ReportGroupTrendFieldTypePassRate is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypePassRate = "PASS_RATE" + + // ReportGroupTrendFieldTypeDuration is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeDuration = "DURATION" + + // ReportGroupTrendFieldTypeTotal is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeTotal = "TOTAL" + + // ReportGroupTrendFieldTypeLineCoverage is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeLineCoverage = "LINE_COVERAGE" + + // ReportGroupTrendFieldTypeLinesCovered is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeLinesCovered = "LINES_COVERED" + + // ReportGroupTrendFieldTypeLinesMissed is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeLinesMissed = "LINES_MISSED" + + // ReportGroupTrendFieldTypeBranchCoverage is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeBranchCoverage = "BRANCH_COVERAGE" + + // ReportGroupTrendFieldTypeBranchesCovered is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeBranchesCovered = "BRANCHES_COVERED" + + // ReportGroupTrendFieldTypeBranchesMissed is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeBranchesMissed = "BRANCHES_MISSED" +) + +// ReportGroupTrendFieldType_Values returns all elements of the ReportGroupTrendFieldType enum +func ReportGroupTrendFieldType_Values() []string { + return []string{ + ReportGroupTrendFieldTypePassRate, + ReportGroupTrendFieldTypeDuration, + ReportGroupTrendFieldTypeTotal, + ReportGroupTrendFieldTypeLineCoverage, + ReportGroupTrendFieldTypeLinesCovered, + ReportGroupTrendFieldTypeLinesMissed, + ReportGroupTrendFieldTypeBranchCoverage, + ReportGroupTrendFieldTypeBranchesCovered, + ReportGroupTrendFieldTypeBranchesMissed, + } +} + const ( // ReportPackagingTypeZip is a ReportPackagingType enum value ReportPackagingTypeZip = "ZIP" diff --git a/service/codebuild/codebuildiface/interface.go b/service/codebuild/codebuildiface/interface.go index 44883edd43e..5cd1bf39e34 100644 --- a/service/codebuild/codebuildiface/interface.go +++ b/service/codebuild/codebuildiface/interface.go @@ -138,6 +138,10 @@ type CodeBuildAPI interface { DescribeTestCasesPages(*codebuild.DescribeTestCasesInput, func(*codebuild.DescribeTestCasesOutput, bool) bool) error DescribeTestCasesPagesWithContext(aws.Context, *codebuild.DescribeTestCasesInput, func(*codebuild.DescribeTestCasesOutput, bool) bool, ...request.Option) error + GetReportGroupTrend(*codebuild.GetReportGroupTrendInput) (*codebuild.GetReportGroupTrendOutput, error) + GetReportGroupTrendWithContext(aws.Context, *codebuild.GetReportGroupTrendInput, ...request.Option) (*codebuild.GetReportGroupTrendOutput, error) + GetReportGroupTrendRequest(*codebuild.GetReportGroupTrendInput) (*request.Request, *codebuild.GetReportGroupTrendOutput) + GetResourcePolicy(*codebuild.GetResourcePolicyInput) (*codebuild.GetResourcePolicyOutput, error) GetResourcePolicyWithContext(aws.Context, *codebuild.GetResourcePolicyInput, ...request.Option) (*codebuild.GetResourcePolicyOutput, error) GetResourcePolicyRequest(*codebuild.GetResourcePolicyInput) (*request.Request, *codebuild.GetResourcePolicyOutput) diff --git a/service/cognitoidentityprovider/api.go b/service/cognitoidentityprovider/api.go index 86c0162ec51..4a0e782d835 100644 --- a/service/cognitoidentityprovider/api.go +++ b/service/cognitoidentityprovider/api.go @@ -9217,7 +9217,11 @@ func (c *CognitoIdentityProvider) SetUserMFAPreferenceRequest(input *SetUserMFAP // be set as preferred. The preferred MFA factor will be used to authenticate // a user if multiple factors are enabled. If multiple options are enabled and // no preference is set, a challenge to choose an MFA option will be returned -// during sign in. +// during sign in. If an MFA type is enabled for a user, the user will be prompted +// for MFA during all sign in attempts, unless device tracking is turned on +// and the device has been trusted. If you would like MFA to be applied selectively +// based on the assessed risk level of sign in attempts, disable MFA for users +// and turn on Adaptive Authentication for the user pool. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17208,10 +17212,14 @@ type CreateUserPoolInput struct { // The email configuration. EmailConfiguration *EmailConfigurationType `type:"structure"` - // A string representing the email verification message. + // A string representing the email verification message. EmailVerificationMessage + // is allowed only if EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailVerificationMessage *string `min:"6" type:"string"` - // A string representing the email verification subject. + // A string representing the email verification subject. EmailVerificationSubject + // is allowed only if EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailVerificationSubject *string `min:"1" type:"string"` // The Lambda trigger configuration information for the new user pool. @@ -17566,6 +17574,124 @@ func (s *CustomDomainConfigType) SetCertificateArn(v string) *CustomDomainConfig return s } +// A custom email sender Lambda configuration type. +type CustomEmailLambdaVersionConfigType struct { + _ struct{} `type:"structure"` + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito + // triggers to send email notifications to users. + // + // LambdaArn is a required field + LambdaArn *string `min:"20" type:"string" required:"true"` + + // The Lambda version represents the signature of the "request" attribute in + // the "event" information Amazon Cognito passes to your custom email Lambda + // function. The only supported value is V1_0. + // + // LambdaVersion is a required field + LambdaVersion *string `type:"string" required:"true" enum:"CustomEmailSenderLambdaVersionType"` +} + +// String returns the string representation +func (s CustomEmailLambdaVersionConfigType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomEmailLambdaVersionConfigType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomEmailLambdaVersionConfigType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomEmailLambdaVersionConfigType"} + if s.LambdaArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaArn")) + } + if s.LambdaArn != nil && len(*s.LambdaArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("LambdaArn", 20)) + } + if s.LambdaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaArn sets the LambdaArn field's value. +func (s *CustomEmailLambdaVersionConfigType) SetLambdaArn(v string) *CustomEmailLambdaVersionConfigType { + s.LambdaArn = &v + return s +} + +// SetLambdaVersion sets the LambdaVersion field's value. +func (s *CustomEmailLambdaVersionConfigType) SetLambdaVersion(v string) *CustomEmailLambdaVersionConfigType { + s.LambdaVersion = &v + return s +} + +// A custom SMS sender Lambda configuration type. +type CustomSMSLambdaVersionConfigType struct { + _ struct{} `type:"structure"` + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito + // triggers to send SMS notifications to users. + // + // LambdaArn is a required field + LambdaArn *string `min:"20" type:"string" required:"true"` + + // The Lambda version represents the signature of the "request" attribute in + // the "event" information Amazon Cognito passes to your custom SMS Lambda function. + // The only supported value is V1_0. + // + // LambdaVersion is a required field + LambdaVersion *string `type:"string" required:"true" enum:"CustomSMSSenderLambdaVersionType"` +} + +// String returns the string representation +func (s CustomSMSLambdaVersionConfigType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomSMSLambdaVersionConfigType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomSMSLambdaVersionConfigType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomSMSLambdaVersionConfigType"} + if s.LambdaArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaArn")) + } + if s.LambdaArn != nil && len(*s.LambdaArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("LambdaArn", 20)) + } + if s.LambdaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaArn sets the LambdaArn field's value. +func (s *CustomSMSLambdaVersionConfigType) SetLambdaArn(v string) *CustomSMSLambdaVersionConfigType { + s.LambdaArn = &v + return s +} + +// SetLambdaVersion sets the LambdaVersion field's value. +func (s *CustomSMSLambdaVersionConfigType) SetLambdaVersion(v string) *CustomSMSLambdaVersionConfigType { + s.LambdaVersion = &v + return s +} + type DeleteGroupInput struct { _ struct{} `type:"structure"` @@ -18922,6 +19048,10 @@ func (s *DuplicateProviderException) RequestID() string { } // The email configuration type. +// +// Amazon Cognito has specific regions for use with Amazon SES. For more information +// on the supported regions, see Email Settings for Amazon Cognito User Pools +// (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-email.html). type EmailConfigurationType struct { _ struct{} `type:"structure"` @@ -18962,6 +19092,27 @@ type EmailConfigurationType struct { // the FROM address, provide the ARN of an Amazon SES verified email address // for the SourceArn parameter. // + // If EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't + // allowed: + // + // * EmailVerificationMessage + // + // * EmailVerificationSubject + // + // * InviteMessageTemplate.EmailMessage + // + // * InviteMessageTemplate.EmailSubject + // + // * VerificationMessageTemplate.EmailMessage + // + // * VerificationMessageTemplate.EmailMessageByLink + // + // * VerificationMessageTemplate.EmailSubject, + // + // * VerificationMessageTemplate.EmailSubjectByLink + // + // DEVELOPER EmailSendingAccount is required. + // // DEVELOPER // // When Amazon Cognito emails your users, it uses your Amazon SES configuration. @@ -21479,12 +21630,23 @@ type LambdaConfigType struct { // Creates an authentication challenge. CreateAuthChallenge *string `min:"20" type:"string"` + // A custom email sender AWS Lambda trigger. + CustomEmailSender *CustomEmailLambdaVersionConfigType `type:"structure"` + // A custom Message AWS Lambda trigger. CustomMessage *string `min:"20" type:"string"` + // A custom SMS sender AWS Lambda trigger. + CustomSMSSender *CustomSMSLambdaVersionConfigType `type:"structure"` + // Defines the authentication challenge. DefineAuthChallenge *string `min:"20" type:"string"` + // The Amazon Resource Name of Key Management Service Customer master keys (/kms/latest/developerguide/concepts.html#master_keys) + // . Amazon Cognito uses the key to encrypt codes and temporary passwords sent + // to CustomEmailSender and CustomSMSSender. + KMSKeyID *string `min:"20" type:"string"` + // A post-authentication AWS Lambda trigger. PostAuthentication *string `min:"20" type:"string"` @@ -21529,6 +21691,9 @@ func (s *LambdaConfigType) Validate() error { if s.DefineAuthChallenge != nil && len(*s.DefineAuthChallenge) < 20 { invalidParams.Add(request.NewErrParamMinLen("DefineAuthChallenge", 20)) } + if s.KMSKeyID != nil && len(*s.KMSKeyID) < 20 { + invalidParams.Add(request.NewErrParamMinLen("KMSKeyID", 20)) + } if s.PostAuthentication != nil && len(*s.PostAuthentication) < 20 { invalidParams.Add(request.NewErrParamMinLen("PostAuthentication", 20)) } @@ -21550,6 +21715,16 @@ func (s *LambdaConfigType) Validate() error { if s.VerifyAuthChallengeResponse != nil && len(*s.VerifyAuthChallengeResponse) < 20 { invalidParams.Add(request.NewErrParamMinLen("VerifyAuthChallengeResponse", 20)) } + if s.CustomEmailSender != nil { + if err := s.CustomEmailSender.Validate(); err != nil { + invalidParams.AddNested("CustomEmailSender", err.(request.ErrInvalidParams)) + } + } + if s.CustomSMSSender != nil { + if err := s.CustomSMSSender.Validate(); err != nil { + invalidParams.AddNested("CustomSMSSender", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -21563,18 +21738,36 @@ func (s *LambdaConfigType) SetCreateAuthChallenge(v string) *LambdaConfigType { return s } +// SetCustomEmailSender sets the CustomEmailSender field's value. +func (s *LambdaConfigType) SetCustomEmailSender(v *CustomEmailLambdaVersionConfigType) *LambdaConfigType { + s.CustomEmailSender = v + return s +} + // SetCustomMessage sets the CustomMessage field's value. func (s *LambdaConfigType) SetCustomMessage(v string) *LambdaConfigType { s.CustomMessage = &v return s } +// SetCustomSMSSender sets the CustomSMSSender field's value. +func (s *LambdaConfigType) SetCustomSMSSender(v *CustomSMSLambdaVersionConfigType) *LambdaConfigType { + s.CustomSMSSender = v + return s +} + // SetDefineAuthChallenge sets the DefineAuthChallenge field's value. func (s *LambdaConfigType) SetDefineAuthChallenge(v string) *LambdaConfigType { s.DefineAuthChallenge = &v return s } +// SetKMSKeyID sets the KMSKeyID field's value. +func (s *LambdaConfigType) SetKMSKeyID(v string) *LambdaConfigType { + s.KMSKeyID = &v + return s +} + // SetPostAuthentication sets the PostAuthentication field's value. func (s *LambdaConfigType) SetPostAuthentication(v string) *LambdaConfigType { s.PostAuthentication = &v @@ -22805,10 +22998,14 @@ func (s *MFAOptionType) SetDeliveryMedium(v string) *MFAOptionType { type MessageTemplateType struct { _ struct{} `type:"structure"` - // The message template for email messages. + // The message template for email messages. EmailMessage is allowed only if + // EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailMessage *string `min:"6" type:"string"` - // The subject line for email messages. + // The subject line for email messages. EmailSubject is allowed only if EmailSendingAccount + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailSubject *string `min:"1" type:"string"` // The message template for SMS messages. @@ -24179,11 +24376,19 @@ func (s *RiskExceptionConfigurationType) SetSkippedIPRangeList(v []*string) *Ris return s } -// The type used for enabling SMS MFA at the user level. +// The type used for enabling SMS MFA at the user level. Phone numbers don't +// need to be verified to be used for SMS MFA. If an MFA type is enabled for +// a user, the user will be prompted for MFA during all sign in attempts, unless +// device tracking is turned on and the device has been trusted. If you would +// like MFA to be applied selectively based on the assessed risk level of sign +// in attempts, disable MFA for users and turn on Adaptive Authentication for +// the user pool. type SMSMfaSettingsType struct { _ struct{} `type:"structure"` - // Specifies whether SMS text message MFA is enabled. + // Specifies whether SMS text message MFA is enabled. If an MFA type is enabled + // for a user, the user will be prompted for MFA during all sign in attempts, + // unless device tracking is turned on and the device has been trusted. Enabled *bool `type:"boolean"` // Specifies whether SMS is the preferred MFA method. @@ -25125,7 +25330,8 @@ type SmsConfigurationType struct { // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service // (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito - // will use to send SMS messages. + // will use to send SMS messages. SMS messages are subject to a spending limit + // (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html). // // SnsCallerArn is a required field SnsCallerArn *string `min:"20" type:"string" required:"true"` @@ -25304,11 +25510,18 @@ func (s *SoftwareTokenMfaConfigType) SetEnabled(v bool) *SoftwareTokenMfaConfigT return s } -// The type used for enabling software token MFA at the user level. +// The type used for enabling software token MFA at the user level. If an MFA +// type is enabled for a user, the user will be prompted for MFA during all +// sign in attempts, unless device tracking is turned on and the device has +// been trusted. If you would like MFA to be applied selectively based on the +// assessed risk level of sign in attempts, disable MFA for users and turn on +// Adaptive Authentication for the user pool. type SoftwareTokenMfaSettingsType struct { _ struct{} `type:"structure"` - // Specifies whether software token MFA is enabled. + // Specifies whether software token MFA is enabled. If an MFA type is enabled + // for a user, the user will be prompted for MFA during all sign in attempts, + // unless device tracking is turned on and the device has been trusted. Enabled *bool `type:"boolean"` // Specifies whether software token MFA is the preferred MFA method. @@ -29043,17 +29256,25 @@ type VerificationMessageTemplateType struct { // The default email option. DefaultEmailOption *string `type:"string" enum:"DefaultEmailOptionType"` - // The email message template. + // The email message template. EmailMessage is allowed only if EmailSendingAccount + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailMessage *string `min:"6" type:"string"` - // The email message template for sending a confirmation link to the user. + // The email message template for sending a confirmation link to the user. EmailMessageByLink + // is allowed only if EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailMessageByLink *string `min:"6" type:"string"` - // The subject line for the email message template. + // The subject line for the email message template. EmailSubject is allowed + // only if EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailSubject *string `min:"1" type:"string"` // The subject line for the email message template for sending a confirmation - // link to the user. + // link to the user. EmailSubjectByLink is allowed only EmailSendingAccount + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailSubjectByLink *string `min:"1" type:"string"` // The SMS message template. @@ -29546,6 +29767,30 @@ func CompromisedCredentialsEventActionType_Values() []string { } } +const ( + // CustomEmailSenderLambdaVersionTypeV10 is a CustomEmailSenderLambdaVersionType enum value + CustomEmailSenderLambdaVersionTypeV10 = "V1_0" +) + +// CustomEmailSenderLambdaVersionType_Values returns all elements of the CustomEmailSenderLambdaVersionType enum +func CustomEmailSenderLambdaVersionType_Values() []string { + return []string{ + CustomEmailSenderLambdaVersionTypeV10, + } +} + +const ( + // CustomSMSSenderLambdaVersionTypeV10 is a CustomSMSSenderLambdaVersionType enum value + CustomSMSSenderLambdaVersionTypeV10 = "V1_0" +) + +// CustomSMSSenderLambdaVersionType_Values returns all elements of the CustomSMSSenderLambdaVersionType enum +func CustomSMSSenderLambdaVersionType_Values() []string { + return []string{ + CustomSMSSenderLambdaVersionTypeV10, + } +} + const ( // DefaultEmailOptionTypeConfirmWithLink is a DefaultEmailOptionType enum value DefaultEmailOptionTypeConfirmWithLink = "CONFIRM_WITH_LINK" diff --git a/service/comprehend/api.go b/service/comprehend/api.go index f76ee8f111e..8de97801eca 100644 --- a/service/comprehend/api.go +++ b/service/comprehend/api.go @@ -1756,6 +1756,94 @@ func (c *Comprehend) DescribeEntityRecognizerWithContext(ctx aws.Context, input return out, req.Send() } +const opDescribeEventsDetectionJob = "DescribeEventsDetectionJob" + +// DescribeEventsDetectionJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventsDetectionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEventsDetectionJob for more information on using the DescribeEventsDetectionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEventsDetectionJobRequest method. +// req, resp := client.DescribeEventsDetectionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeEventsDetectionJob +func (c *Comprehend) DescribeEventsDetectionJobRequest(input *DescribeEventsDetectionJobInput) (req *request.Request, output *DescribeEventsDetectionJobOutput) { + op := &request.Operation{ + Name: opDescribeEventsDetectionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventsDetectionJobInput{} + } + + output = &DescribeEventsDetectionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEventsDetectionJob API operation for Amazon Comprehend. +// +// Gets the status and details of an events detection job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Comprehend's +// API operation DescribeEventsDetectionJob for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is invalid. +// +// * JobNotFoundException +// The specified job was not found. Check the job ID and try again. +// +// * TooManyRequestsException +// The number of requests exceeds the limit. Resubmit your request later. +// +// * InternalServerException +// An internal server error occurred. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeEventsDetectionJob +func (c *Comprehend) DescribeEventsDetectionJob(input *DescribeEventsDetectionJobInput) (*DescribeEventsDetectionJobOutput, error) { + req, out := c.DescribeEventsDetectionJobRequest(input) + return out, req.Send() +} + +// DescribeEventsDetectionJobWithContext is the same as DescribeEventsDetectionJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEventsDetectionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) DescribeEventsDetectionJobWithContext(ctx aws.Context, input *DescribeEventsDetectionJobInput, opts ...request.Option) (*DescribeEventsDetectionJobOutput, error) { + req, out := c.DescribeEventsDetectionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeKeyPhrasesDetectionJob = "DescribeKeyPhrasesDetectionJob" // DescribeKeyPhrasesDetectionJobRequest generates a "aws/request.Request" representing the @@ -3489,6 +3577,152 @@ func (c *Comprehend) ListEntityRecognizersPagesWithContext(ctx aws.Context, inpu return p.Err() } +const opListEventsDetectionJobs = "ListEventsDetectionJobs" + +// ListEventsDetectionJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListEventsDetectionJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEventsDetectionJobs for more information on using the ListEventsDetectionJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEventsDetectionJobsRequest method. +// req, resp := client.ListEventsDetectionJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListEventsDetectionJobs +func (c *Comprehend) ListEventsDetectionJobsRequest(input *ListEventsDetectionJobsInput) (req *request.Request, output *ListEventsDetectionJobsOutput) { + op := &request.Operation{ + Name: opListEventsDetectionJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEventsDetectionJobsInput{} + } + + output = &ListEventsDetectionJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEventsDetectionJobs API operation for Amazon Comprehend. +// +// Gets a list of the events detection jobs that you have submitted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Comprehend's +// API operation ListEventsDetectionJobs for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is invalid. +// +// * TooManyRequestsException +// The number of requests exceeds the limit. Resubmit your request later. +// +// * InvalidFilterException +// The filter specified for the operation is invalid. Specify a different filter. +// +// * InternalServerException +// An internal server error occurred. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListEventsDetectionJobs +func (c *Comprehend) ListEventsDetectionJobs(input *ListEventsDetectionJobsInput) (*ListEventsDetectionJobsOutput, error) { + req, out := c.ListEventsDetectionJobsRequest(input) + return out, req.Send() +} + +// ListEventsDetectionJobsWithContext is the same as ListEventsDetectionJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListEventsDetectionJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) ListEventsDetectionJobsWithContext(ctx aws.Context, input *ListEventsDetectionJobsInput, opts ...request.Option) (*ListEventsDetectionJobsOutput, error) { + req, out := c.ListEventsDetectionJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEventsDetectionJobsPages iterates over the pages of a ListEventsDetectionJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEventsDetectionJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEventsDetectionJobs operation. +// pageNum := 0 +// err := client.ListEventsDetectionJobsPages(params, +// func(page *comprehend.ListEventsDetectionJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Comprehend) ListEventsDetectionJobsPages(input *ListEventsDetectionJobsInput, fn func(*ListEventsDetectionJobsOutput, bool) bool) error { + return c.ListEventsDetectionJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEventsDetectionJobsPagesWithContext same as ListEventsDetectionJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) ListEventsDetectionJobsPagesWithContext(ctx aws.Context, input *ListEventsDetectionJobsInput, fn func(*ListEventsDetectionJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEventsDetectionJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEventsDetectionJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListEventsDetectionJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListKeyPhrasesDetectionJobs = "ListKeyPhrasesDetectionJobs" // ListKeyPhrasesDetectionJobsRequest generates a "aws/request.Request" representing the @@ -4392,6 +4626,95 @@ func (c *Comprehend) StartEntitiesDetectionJobWithContext(ctx aws.Context, input return out, req.Send() } +const opStartEventsDetectionJob = "StartEventsDetectionJob" + +// StartEventsDetectionJobRequest generates a "aws/request.Request" representing the +// client's request for the StartEventsDetectionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartEventsDetectionJob for more information on using the StartEventsDetectionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartEventsDetectionJobRequest method. +// req, resp := client.StartEventsDetectionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartEventsDetectionJob +func (c *Comprehend) StartEventsDetectionJobRequest(input *StartEventsDetectionJobInput) (req *request.Request, output *StartEventsDetectionJobOutput) { + op := &request.Operation{ + Name: opStartEventsDetectionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartEventsDetectionJobInput{} + } + + output = &StartEventsDetectionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartEventsDetectionJob API operation for Amazon Comprehend. +// +// Starts an asynchronous event detection job for a collection of documents. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Comprehend's +// API operation StartEventsDetectionJob for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is invalid. +// +// * TooManyRequestsException +// The number of requests exceeds the limit. Resubmit your request later. +// +// * KmsKeyValidationException +// The KMS customer managed key (CMK) entered cannot be validated. Verify the +// key and re-enter it. +// +// * InternalServerException +// An internal server error occurred. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartEventsDetectionJob +func (c *Comprehend) StartEventsDetectionJob(input *StartEventsDetectionJobInput) (*StartEventsDetectionJobOutput, error) { + req, out := c.StartEventsDetectionJobRequest(input) + return out, req.Send() +} + +// StartEventsDetectionJobWithContext is the same as StartEventsDetectionJob with the addition of +// the ability to pass a context and additional request options. +// +// See StartEventsDetectionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) StartEventsDetectionJobWithContext(ctx aws.Context, input *StartEventsDetectionJobInput, opts ...request.Option) (*StartEventsDetectionJobOutput, error) { + req, out := c.StartEventsDetectionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartKeyPhrasesDetectionJob = "StartKeyPhrasesDetectionJob" // StartKeyPhrasesDetectionJobRequest generates a "aws/request.Request" representing the @@ -4943,33 +5266,118 @@ func (c *Comprehend) StopEntitiesDetectionJobWithContext(ctx aws.Context, input return out, req.Send() } -const opStopKeyPhrasesDetectionJob = "StopKeyPhrasesDetectionJob" +const opStopEventsDetectionJob = "StopEventsDetectionJob" -// StopKeyPhrasesDetectionJobRequest generates a "aws/request.Request" representing the -// client's request for the StopKeyPhrasesDetectionJob operation. The "output" return +// StopEventsDetectionJobRequest generates a "aws/request.Request" representing the +// client's request for the StopEventsDetectionJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopKeyPhrasesDetectionJob for more information on using the StopKeyPhrasesDetectionJob +// See StopEventsDetectionJob for more information on using the StopEventsDetectionJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopKeyPhrasesDetectionJobRequest method. -// req, resp := client.StopKeyPhrasesDetectionJobRequest(params) +// // Example sending a request using the StopEventsDetectionJobRequest method. +// req, resp := client.StopEventsDetectionJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopKeyPhrasesDetectionJob -func (c *Comprehend) StopKeyPhrasesDetectionJobRequest(input *StopKeyPhrasesDetectionJobInput) (req *request.Request, output *StopKeyPhrasesDetectionJobOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopEventsDetectionJob +func (c *Comprehend) StopEventsDetectionJobRequest(input *StopEventsDetectionJobInput) (req *request.Request, output *StopEventsDetectionJobOutput) { + op := &request.Operation{ + Name: opStopEventsDetectionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopEventsDetectionJobInput{} + } + + output = &StopEventsDetectionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopEventsDetectionJob API operation for Amazon Comprehend. +// +// Stops an events detection job in progress. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Comprehend's +// API operation StopEventsDetectionJob for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is invalid. +// +// * JobNotFoundException +// The specified job was not found. Check the job ID and try again. +// +// * InternalServerException +// An internal server error occurred. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopEventsDetectionJob +func (c *Comprehend) StopEventsDetectionJob(input *StopEventsDetectionJobInput) (*StopEventsDetectionJobOutput, error) { + req, out := c.StopEventsDetectionJobRequest(input) + return out, req.Send() +} + +// StopEventsDetectionJobWithContext is the same as StopEventsDetectionJob with the addition of +// the ability to pass a context and additional request options. +// +// See StopEventsDetectionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) StopEventsDetectionJobWithContext(ctx aws.Context, input *StopEventsDetectionJobInput, opts ...request.Option) (*StopEventsDetectionJobOutput, error) { + req, out := c.StopEventsDetectionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopKeyPhrasesDetectionJob = "StopKeyPhrasesDetectionJob" + +// StopKeyPhrasesDetectionJobRequest generates a "aws/request.Request" representing the +// client's request for the StopKeyPhrasesDetectionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopKeyPhrasesDetectionJob for more information on using the StopKeyPhrasesDetectionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopKeyPhrasesDetectionJobRequest method. +// req, resp := client.StopKeyPhrasesDetectionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopKeyPhrasesDetectionJob +func (c *Comprehend) StopKeyPhrasesDetectionJobRequest(input *StopKeyPhrasesDetectionJobInput) (req *request.Request, output *StopKeyPhrasesDetectionJobOutput) { op := &request.Operation{ Name: opStopKeyPhrasesDetectionJob, HTTPMethod: "POST", @@ -6757,7 +7165,7 @@ type ClassifyDocumentOutput struct { // The labels used the document being analyzed. These are used for multi-label // trained models. Individual labels represent different categories that are - // related in some manner and are not multually exclusive. For example, a movie + // related in some manner and are not mutually exclusive. For example, a movie // can be just an action movie, or it can be an action movie, a science fiction // movie, and a comedy, all at the same time. Labels []*DocumentLabel `type:"list"` @@ -7901,6 +8309,71 @@ func (s *DescribeEntityRecognizerOutput) SetEntityRecognizerProperties(v *Entity return s } +type DescribeEventsDetectionJobInput struct { + _ struct{} `type:"structure"` + + // The identifier of the events detection job. + // + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeEventsDetectionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsDetectionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventsDetectionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventsDetectionJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *DescribeEventsDetectionJobInput) SetJobId(v string) *DescribeEventsDetectionJobInput { + s.JobId = &v + return s +} + +type DescribeEventsDetectionJobOutput struct { + _ struct{} `type:"structure"` + + // An object that contains the properties associated with an event detection + // job. + EventsDetectionJobProperties *EventsDetectionJobProperties `type:"structure"` +} + +// String returns the string representation +func (s DescribeEventsDetectionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsDetectionJobOutput) GoString() string { + return s.String() +} + +// SetEventsDetectionJobProperties sets the EventsDetectionJobProperties field's value. +func (s *DescribeEventsDetectionJobOutput) SetEventsDetectionJobProperties(v *EventsDetectionJobProperties) *DescribeEventsDetectionJobOutput { + s.EventsDetectionJobProperties = v + return s +} + type DescribeKeyPhrasesDetectionJobInput struct { _ struct{} `type:"structure"` @@ -9555,7 +10028,7 @@ func (s *DominantLanguageDetectionJobProperties) SetVpcConfig(v *VpcConfig) *Dom return s } -// The filter used to determine which endpoints are are returned. You can filter +// The filter used to determine which endpoints are returned. You can filter // jobs on their name, model, status, or the date and time that they were created. // You can only set one filter at a time. type EndpointFilter struct { @@ -10599,7 +11072,7 @@ func (s *EntityRecognizerProperties) SetVpcConfig(v *VpcConfig) *EntityRecognize type EntityTypesEvaluationMetrics struct { _ struct{} `type:"structure"` - // A measure of how accurate the recognizer results are for for a specific entity + // A measure of how accurate the recognizer results are for a specific entity // type in the test data. It is derived from the Precision and Recall values. // The F1Score is the harmonic average of the two scores. The highest score // is 1, and the worst score is 0. @@ -10689,48 +11162,43 @@ func (s *EntityTypesListItem) SetType(v string) *EntityTypesListItem { return s } -// The input properties for a topic detection job. -type InputDataConfig struct { +// Provides information for filtering a list of event detection jobs. +type EventsDetectionJobFilter struct { _ struct{} `type:"structure"` - // Specifies how the text in an input file should be processed: - // - // * ONE_DOC_PER_FILE - Each file is considered a separate document. Use - // this option when you are processing large documents, such as newspaper - // articles or scientific papers. - // - // * ONE_DOC_PER_LINE - Each line in a file is considered a separate document. - // Use this option when you are processing many short documents, such as - // text messages. - InputFormat *string `type:"string" enum:"InputFormat"` + // Filters on the name of the events detection job. + JobName *string `min:"1" type:"string"` - // The Amazon S3 URI for the input data. The URI must be in same region as the - // API endpoint that you are calling. The URI can point to a single input file - // or it can provide the prefix for a collection of data files. - // - // For example, if you use the URI S3://bucketName/prefix, if the prefix is - // a single file, Amazon Comprehend uses that file as input. If more than one - // file begins with the prefix, Amazon Comprehend uses all of them as input. - // - // S3Uri is a required field - S3Uri *string `type:"string" required:"true"` + // Filters the list of jobs based on job status. Returns only jobs with the + // specified status. + JobStatus *string `type:"string" enum:"JobStatus"` + + // Filters the list of jobs based on the time that the job was submitted for + // processing. Returns only jobs submitted after the specified time. Jobs are + // returned in descending order, newest to oldest. + SubmitTimeAfter *time.Time `type:"timestamp"` + + // Filters the list of jobs based on the time that the job was submitted for + // processing. Returns only jobs submitted before the specified time. Jobs are + // returned in ascending order, oldest to newest. + SubmitTimeBefore *time.Time `type:"timestamp"` } // String returns the string representation -func (s InputDataConfig) String() string { +func (s EventsDetectionJobFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InputDataConfig) GoString() string { +func (s EventsDetectionJobFilter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *InputDataConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InputDataConfig"} - if s.S3Uri == nil { - invalidParams.Add(request.NewErrParamRequired("S3Uri")) +func (s *EventsDetectionJobFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventsDetectionJobFilter"} + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) } if invalidParams.Len() > 0 { @@ -10739,98 +11207,289 @@ func (s *InputDataConfig) Validate() error { return nil } -// SetInputFormat sets the InputFormat field's value. -func (s *InputDataConfig) SetInputFormat(v string) *InputDataConfig { - s.InputFormat = &v +// SetJobName sets the JobName field's value. +func (s *EventsDetectionJobFilter) SetJobName(v string) *EventsDetectionJobFilter { + s.JobName = &v return s } -// SetS3Uri sets the S3Uri field's value. -func (s *InputDataConfig) SetS3Uri(v string) *InputDataConfig { - s.S3Uri = &v +// SetJobStatus sets the JobStatus field's value. +func (s *EventsDetectionJobFilter) SetJobStatus(v string) *EventsDetectionJobFilter { + s.JobStatus = &v return s } -// An internal server error occurred. Retry your request. -type InternalServerException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" min:"1" type:"string"` +// SetSubmitTimeAfter sets the SubmitTimeAfter field's value. +func (s *EventsDetectionJobFilter) SetSubmitTimeAfter(v time.Time) *EventsDetectionJobFilter { + s.SubmitTimeAfter = &v + return s } -// String returns the string representation -func (s InternalServerException) String() string { - return awsutil.Prettify(s) +// SetSubmitTimeBefore sets the SubmitTimeBefore field's value. +func (s *EventsDetectionJobFilter) SetSubmitTimeBefore(v time.Time) *EventsDetectionJobFilter { + s.SubmitTimeBefore = &v + return s } -// GoString returns the string representation -func (s InternalServerException) GoString() string { - return s.String() -} +// Provides information about an events detection job. +type EventsDetectionJobProperties struct { + _ struct{} `type:"structure"` -func newErrorInternalServerException(v protocol.ResponseMetadata) error { - return &InternalServerException{ - RespMetadata: v, - } -} + // The Amazon Resource Name (ARN) of the AWS Identify and Access Management + // (IAM) role that grants Amazon Comprehend read access to your input data. + DataAccessRoleArn *string `min:"20" type:"string"` -// Code returns the exception type name. -func (s *InternalServerException) Code() string { - return "InternalServerException" -} + // The time that the events detection job completed. + EndTime *time.Time `type:"timestamp"` -// Message returns the exception's message. -func (s *InternalServerException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} + // The input data configuration that you supplied when you created the events + // detection job. + InputDataConfig *InputDataConfig `type:"structure"` -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalServerException) OrigErr() error { - return nil -} + // The identifier assigned to the events detection job. + JobId *string `min:"1" type:"string"` -func (s *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} + // The name you assigned the events detection job. + JobName *string `min:"1" type:"string"` -// Status code returns the HTTP status code for the request's response error. -func (s *InternalServerException) StatusCode() int { - return s.RespMetadata.StatusCode -} + // The current status of the events detection job. + JobStatus *string `type:"string" enum:"JobStatus"` -// RequestID returns the service's response RequestID for request. -func (s *InternalServerException) RequestID() string { - return s.RespMetadata.RequestID -} + // The language code of the input documents. + LanguageCode *string `type:"string" enum:"LanguageCode"` -// The filter specified for the operation is invalid. Specify a different filter. -type InvalidFilterException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // A description of the status of the events detection job. + Message *string `type:"string"` - Message_ *string `locationName:"Message" min:"1" type:"string"` + // The output data configuration that you supplied when you created the events + // detection job. + OutputDataConfig *OutputDataConfig `type:"structure"` + + // The time that the events detection job was submitted for processing. + SubmitTime *time.Time `type:"timestamp"` + + // The types of events that are detected by the job. + TargetEventTypes []*string `min:"1" type:"list"` } // String returns the string representation -func (s InvalidFilterException) String() string { +func (s EventsDetectionJobProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidFilterException) GoString() string { +func (s EventsDetectionJobProperties) GoString() string { return s.String() } -func newErrorInvalidFilterException(v protocol.ResponseMetadata) error { - return &InvalidFilterException{ - RespMetadata: v, - } -} - +// SetDataAccessRoleArn sets the DataAccessRoleArn field's value. +func (s *EventsDetectionJobProperties) SetDataAccessRoleArn(v string) *EventsDetectionJobProperties { + s.DataAccessRoleArn = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *EventsDetectionJobProperties) SetEndTime(v time.Time) *EventsDetectionJobProperties { + s.EndTime = &v + return s +} + +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *EventsDetectionJobProperties) SetInputDataConfig(v *InputDataConfig) *EventsDetectionJobProperties { + s.InputDataConfig = v + return s +} + +// SetJobId sets the JobId field's value. +func (s *EventsDetectionJobProperties) SetJobId(v string) *EventsDetectionJobProperties { + s.JobId = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *EventsDetectionJobProperties) SetJobName(v string) *EventsDetectionJobProperties { + s.JobName = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *EventsDetectionJobProperties) SetJobStatus(v string) *EventsDetectionJobProperties { + s.JobStatus = &v + return s +} + +// SetLanguageCode sets the LanguageCode field's value. +func (s *EventsDetectionJobProperties) SetLanguageCode(v string) *EventsDetectionJobProperties { + s.LanguageCode = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *EventsDetectionJobProperties) SetMessage(v string) *EventsDetectionJobProperties { + s.Message = &v + return s +} + +// SetOutputDataConfig sets the OutputDataConfig field's value. +func (s *EventsDetectionJobProperties) SetOutputDataConfig(v *OutputDataConfig) *EventsDetectionJobProperties { + s.OutputDataConfig = v + return s +} + +// SetSubmitTime sets the SubmitTime field's value. +func (s *EventsDetectionJobProperties) SetSubmitTime(v time.Time) *EventsDetectionJobProperties { + s.SubmitTime = &v + return s +} + +// SetTargetEventTypes sets the TargetEventTypes field's value. +func (s *EventsDetectionJobProperties) SetTargetEventTypes(v []*string) *EventsDetectionJobProperties { + s.TargetEventTypes = v + return s +} + +// The input properties for a topic detection job. +type InputDataConfig struct { + _ struct{} `type:"structure"` + + // Specifies how the text in an input file should be processed: + // + // * ONE_DOC_PER_FILE - Each file is considered a separate document. Use + // this option when you are processing large documents, such as newspaper + // articles or scientific papers. + // + // * ONE_DOC_PER_LINE - Each line in a file is considered a separate document. + // Use this option when you are processing many short documents, such as + // text messages. + InputFormat *string `type:"string" enum:"InputFormat"` + + // The Amazon S3 URI for the input data. The URI must be in same region as the + // API endpoint that you are calling. The URI can point to a single input file + // or it can provide the prefix for a collection of data files. + // + // For example, if you use the URI S3://bucketName/prefix, if the prefix is + // a single file, Amazon Comprehend uses that file as input. If more than one + // file begins with the prefix, Amazon Comprehend uses all of them as input. + // + // S3Uri is a required field + S3Uri *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InputDataConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDataConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputDataConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputDataConfig"} + if s.S3Uri == nil { + invalidParams.Add(request.NewErrParamRequired("S3Uri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputFormat sets the InputFormat field's value. +func (s *InputDataConfig) SetInputFormat(v string) *InputDataConfig { + s.InputFormat = &v + return s +} + +// SetS3Uri sets the S3Uri field's value. +func (s *InputDataConfig) SetS3Uri(v string) *InputDataConfig { + s.S3Uri = &v + return s +} + +// An internal server error occurred. Retry your request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The filter specified for the operation is invalid. Specify a different filter. +type InvalidFilterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvalidFilterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidFilterException) GoString() string { + return s.String() +} + +func newErrorInvalidFilterException(v protocol.ResponseMetadata) error { + return &InvalidFilterException{ + RespMetadata: v, + } +} + // Code returns the exception type name. func (s *InvalidFilterException) Code() string { return "InvalidFilterException" @@ -11858,6 +12517,102 @@ func (s *ListEntityRecognizersOutput) SetNextToken(v string) *ListEntityRecogniz return s } +type ListEventsDetectionJobsInput struct { + _ struct{} `type:"structure"` + + // Filters the jobs that are returned. You can filter jobs on their name, status, + // or the date and time that they were submitted. You can only set one filter + // at a time. + Filter *EventsDetectionJobFilter `type:"structure"` + + // The maximum number of results to return in each page. + MaxResults *int64 `min:"1" type:"integer"` + + // Identifies the next page of results to return. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListEventsDetectionJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventsDetectionJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEventsDetectionJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEventsDetectionJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *ListEventsDetectionJobsInput) SetFilter(v *EventsDetectionJobFilter) *ListEventsDetectionJobsInput { + s.Filter = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListEventsDetectionJobsInput) SetMaxResults(v int64) *ListEventsDetectionJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEventsDetectionJobsInput) SetNextToken(v string) *ListEventsDetectionJobsInput { + s.NextToken = &v + return s +} + +type ListEventsDetectionJobsOutput struct { + _ struct{} `type:"structure"` + + // A list containing the properties of each job that is returned. + EventsDetectionJobPropertiesList []*EventsDetectionJobProperties `type:"list"` + + // Identifies the next page of results to return. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListEventsDetectionJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventsDetectionJobsOutput) GoString() string { + return s.String() +} + +// SetEventsDetectionJobPropertiesList sets the EventsDetectionJobPropertiesList field's value. +func (s *ListEventsDetectionJobsOutput) SetEventsDetectionJobPropertiesList(v []*EventsDetectionJobProperties) *ListEventsDetectionJobsOutput { + s.EventsDetectionJobPropertiesList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEventsDetectionJobsOutput) SetNextToken(v string) *ListEventsDetectionJobsOutput { + s.NextToken = &v + return s +} + type ListKeyPhrasesDetectionJobsInput struct { _ struct{} `type:"structure"` @@ -13856,6 +14611,175 @@ func (s *StartEntitiesDetectionJobOutput) SetJobStatus(v string) *StartEntitiesD return s } +type StartEventsDetectionJobInput struct { + _ struct{} `type:"structure"` + + // An unique identifier for the request. If you don't set the client request + // token, Amazon Comprehend generates one. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that grants Amazon Comprehend read access to your input data. + // + // DataAccessRoleArn is a required field + DataAccessRoleArn *string `min:"20" type:"string" required:"true"` + + // Specifies the format and location of the input data for the job. + // + // InputDataConfig is a required field + InputDataConfig *InputDataConfig `type:"structure" required:"true"` + + // The identifier of the events detection job. + JobName *string `min:"1" type:"string"` + + // The language code of the input documents. + // + // LanguageCode is a required field + LanguageCode *string `type:"string" required:"true" enum:"LanguageCode"` + + // Specifies where to send the output files. + // + // OutputDataConfig is a required field + OutputDataConfig *OutputDataConfig `type:"structure" required:"true"` + + // The types of events to detect in the input documents. + // + // TargetEventTypes is a required field + TargetEventTypes []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartEventsDetectionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartEventsDetectionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartEventsDetectionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartEventsDetectionJobInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.DataAccessRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("DataAccessRoleArn")) + } + if s.DataAccessRoleArn != nil && len(*s.DataAccessRoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("DataAccessRoleArn", 20)) + } + if s.InputDataConfig == nil { + invalidParams.Add(request.NewErrParamRequired("InputDataConfig")) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.LanguageCode == nil { + invalidParams.Add(request.NewErrParamRequired("LanguageCode")) + } + if s.OutputDataConfig == nil { + invalidParams.Add(request.NewErrParamRequired("OutputDataConfig")) + } + if s.TargetEventTypes == nil { + invalidParams.Add(request.NewErrParamRequired("TargetEventTypes")) + } + if s.TargetEventTypes != nil && len(s.TargetEventTypes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetEventTypes", 1)) + } + if s.InputDataConfig != nil { + if err := s.InputDataConfig.Validate(); err != nil { + invalidParams.AddNested("InputDataConfig", err.(request.ErrInvalidParams)) + } + } + if s.OutputDataConfig != nil { + if err := s.OutputDataConfig.Validate(); err != nil { + invalidParams.AddNested("OutputDataConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *StartEventsDetectionJobInput) SetClientRequestToken(v string) *StartEventsDetectionJobInput { + s.ClientRequestToken = &v + return s +} + +// SetDataAccessRoleArn sets the DataAccessRoleArn field's value. +func (s *StartEventsDetectionJobInput) SetDataAccessRoleArn(v string) *StartEventsDetectionJobInput { + s.DataAccessRoleArn = &v + return s +} + +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *StartEventsDetectionJobInput) SetInputDataConfig(v *InputDataConfig) *StartEventsDetectionJobInput { + s.InputDataConfig = v + return s +} + +// SetJobName sets the JobName field's value. +func (s *StartEventsDetectionJobInput) SetJobName(v string) *StartEventsDetectionJobInput { + s.JobName = &v + return s +} + +// SetLanguageCode sets the LanguageCode field's value. +func (s *StartEventsDetectionJobInput) SetLanguageCode(v string) *StartEventsDetectionJobInput { + s.LanguageCode = &v + return s +} + +// SetOutputDataConfig sets the OutputDataConfig field's value. +func (s *StartEventsDetectionJobInput) SetOutputDataConfig(v *OutputDataConfig) *StartEventsDetectionJobInput { + s.OutputDataConfig = v + return s +} + +// SetTargetEventTypes sets the TargetEventTypes field's value. +func (s *StartEventsDetectionJobInput) SetTargetEventTypes(v []*string) *StartEventsDetectionJobInput { + s.TargetEventTypes = v + return s +} + +type StartEventsDetectionJobOutput struct { + _ struct{} `type:"structure"` + + // An unique identifier for the request. If you don't set the client request + // token, Amazon Comprehend generates one. + JobId *string `min:"1" type:"string"` + + // The status of the events detection job. + JobStatus *string `type:"string" enum:"JobStatus"` +} + +// String returns the string representation +func (s StartEventsDetectionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartEventsDetectionJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *StartEventsDetectionJobOutput) SetJobId(v string) *StartEventsDetectionJobOutput { + s.JobId = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *StartEventsDetectionJobOutput) SetJobStatus(v string) *StartEventsDetectionJobOutput { + s.JobStatus = &v + return s +} + type StartKeyPhrasesDetectionJobInput struct { _ struct{} `type:"structure"` @@ -14774,6 +15698,79 @@ func (s *StopEntitiesDetectionJobOutput) SetJobStatus(v string) *StopEntitiesDet return s } +type StopEventsDetectionJobInput struct { + _ struct{} `type:"structure"` + + // The identifier of the events detection job to stop. + // + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopEventsDetectionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopEventsDetectionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopEventsDetectionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopEventsDetectionJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *StopEventsDetectionJobInput) SetJobId(v string) *StopEventsDetectionJobInput { + s.JobId = &v + return s +} + +type StopEventsDetectionJobOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the events detection job to stop. + JobId *string `min:"1" type:"string"` + + // The status of the events detection job. + JobStatus *string `type:"string" enum:"JobStatus"` +} + +// String returns the string representation +func (s StopEventsDetectionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopEventsDetectionJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *StopEventsDetectionJobOutput) SetJobId(v string) *StopEventsDetectionJobOutput { + s.JobId = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *StopEventsDetectionJobOutput) SetJobStatus(v string) *StopEventsDetectionJobOutput { + s.JobStatus = &v + return s +} + type StopKeyPhrasesDetectionJobInput struct { _ struct{} `type:"structure"` @@ -15939,7 +16936,7 @@ func (s UpdateEndpointOutput) GoString() string { } // Configuration parameters for an optional private Virtual Private Cloud (VPC) -// containing the resources you are using for the job. For For more information, +// containing the resources you are using for the job. For more information, // see Amazon VPC (https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). type VpcConfig struct { _ struct{} `type:"structure"` diff --git a/service/comprehend/comprehendiface/interface.go b/service/comprehend/comprehendiface/interface.go index 50799bba75e..edc28d84377 100644 --- a/service/comprehend/comprehendiface/interface.go +++ b/service/comprehend/comprehendiface/interface.go @@ -132,6 +132,10 @@ type ComprehendAPI interface { DescribeEntityRecognizerWithContext(aws.Context, *comprehend.DescribeEntityRecognizerInput, ...request.Option) (*comprehend.DescribeEntityRecognizerOutput, error) DescribeEntityRecognizerRequest(*comprehend.DescribeEntityRecognizerInput) (*request.Request, *comprehend.DescribeEntityRecognizerOutput) + DescribeEventsDetectionJob(*comprehend.DescribeEventsDetectionJobInput) (*comprehend.DescribeEventsDetectionJobOutput, error) + DescribeEventsDetectionJobWithContext(aws.Context, *comprehend.DescribeEventsDetectionJobInput, ...request.Option) (*comprehend.DescribeEventsDetectionJobOutput, error) + DescribeEventsDetectionJobRequest(*comprehend.DescribeEventsDetectionJobInput) (*request.Request, *comprehend.DescribeEventsDetectionJobOutput) + DescribeKeyPhrasesDetectionJob(*comprehend.DescribeKeyPhrasesDetectionJobInput) (*comprehend.DescribeKeyPhrasesDetectionJobOutput, error) DescribeKeyPhrasesDetectionJobWithContext(aws.Context, *comprehend.DescribeKeyPhrasesDetectionJobInput, ...request.Option) (*comprehend.DescribeKeyPhrasesDetectionJobOutput, error) DescribeKeyPhrasesDetectionJobRequest(*comprehend.DescribeKeyPhrasesDetectionJobInput) (*request.Request, *comprehend.DescribeKeyPhrasesDetectionJobOutput) @@ -211,6 +215,13 @@ type ComprehendAPI interface { ListEntityRecognizersPages(*comprehend.ListEntityRecognizersInput, func(*comprehend.ListEntityRecognizersOutput, bool) bool) error ListEntityRecognizersPagesWithContext(aws.Context, *comprehend.ListEntityRecognizersInput, func(*comprehend.ListEntityRecognizersOutput, bool) bool, ...request.Option) error + ListEventsDetectionJobs(*comprehend.ListEventsDetectionJobsInput) (*comprehend.ListEventsDetectionJobsOutput, error) + ListEventsDetectionJobsWithContext(aws.Context, *comprehend.ListEventsDetectionJobsInput, ...request.Option) (*comprehend.ListEventsDetectionJobsOutput, error) + ListEventsDetectionJobsRequest(*comprehend.ListEventsDetectionJobsInput) (*request.Request, *comprehend.ListEventsDetectionJobsOutput) + + ListEventsDetectionJobsPages(*comprehend.ListEventsDetectionJobsInput, func(*comprehend.ListEventsDetectionJobsOutput, bool) bool) error + ListEventsDetectionJobsPagesWithContext(aws.Context, *comprehend.ListEventsDetectionJobsInput, func(*comprehend.ListEventsDetectionJobsOutput, bool) bool, ...request.Option) error + ListKeyPhrasesDetectionJobs(*comprehend.ListKeyPhrasesDetectionJobsInput) (*comprehend.ListKeyPhrasesDetectionJobsOutput, error) ListKeyPhrasesDetectionJobsWithContext(aws.Context, *comprehend.ListKeyPhrasesDetectionJobsInput, ...request.Option) (*comprehend.ListKeyPhrasesDetectionJobsOutput, error) ListKeyPhrasesDetectionJobsRequest(*comprehend.ListKeyPhrasesDetectionJobsInput) (*request.Request, *comprehend.ListKeyPhrasesDetectionJobsOutput) @@ -252,6 +263,10 @@ type ComprehendAPI interface { StartEntitiesDetectionJobWithContext(aws.Context, *comprehend.StartEntitiesDetectionJobInput, ...request.Option) (*comprehend.StartEntitiesDetectionJobOutput, error) StartEntitiesDetectionJobRequest(*comprehend.StartEntitiesDetectionJobInput) (*request.Request, *comprehend.StartEntitiesDetectionJobOutput) + StartEventsDetectionJob(*comprehend.StartEventsDetectionJobInput) (*comprehend.StartEventsDetectionJobOutput, error) + StartEventsDetectionJobWithContext(aws.Context, *comprehend.StartEventsDetectionJobInput, ...request.Option) (*comprehend.StartEventsDetectionJobOutput, error) + StartEventsDetectionJobRequest(*comprehend.StartEventsDetectionJobInput) (*request.Request, *comprehend.StartEventsDetectionJobOutput) + StartKeyPhrasesDetectionJob(*comprehend.StartKeyPhrasesDetectionJobInput) (*comprehend.StartKeyPhrasesDetectionJobOutput, error) StartKeyPhrasesDetectionJobWithContext(aws.Context, *comprehend.StartKeyPhrasesDetectionJobInput, ...request.Option) (*comprehend.StartKeyPhrasesDetectionJobOutput, error) StartKeyPhrasesDetectionJobRequest(*comprehend.StartKeyPhrasesDetectionJobInput) (*request.Request, *comprehend.StartKeyPhrasesDetectionJobOutput) @@ -276,6 +291,10 @@ type ComprehendAPI interface { StopEntitiesDetectionJobWithContext(aws.Context, *comprehend.StopEntitiesDetectionJobInput, ...request.Option) (*comprehend.StopEntitiesDetectionJobOutput, error) StopEntitiesDetectionJobRequest(*comprehend.StopEntitiesDetectionJobInput) (*request.Request, *comprehend.StopEntitiesDetectionJobOutput) + StopEventsDetectionJob(*comprehend.StopEventsDetectionJobInput) (*comprehend.StopEventsDetectionJobOutput, error) + StopEventsDetectionJobWithContext(aws.Context, *comprehend.StopEventsDetectionJobInput, ...request.Option) (*comprehend.StopEventsDetectionJobOutput, error) + StopEventsDetectionJobRequest(*comprehend.StopEventsDetectionJobInput) (*request.Request, *comprehend.StopEventsDetectionJobOutput) + StopKeyPhrasesDetectionJob(*comprehend.StopKeyPhrasesDetectionJobInput) (*comprehend.StopKeyPhrasesDetectionJobOutput, error) StopKeyPhrasesDetectionJobWithContext(aws.Context, *comprehend.StopKeyPhrasesDetectionJobInput, ...request.Option) (*comprehend.StopKeyPhrasesDetectionJobOutput, error) StopKeyPhrasesDetectionJobRequest(*comprehend.StopKeyPhrasesDetectionJobInput) (*request.Request, *comprehend.StopKeyPhrasesDetectionJobOutput) diff --git a/service/elasticbeanstalk/api.go b/service/elasticbeanstalk/api.go index a35896b3ed8..8cc9d83ef12 100644 --- a/service/elasticbeanstalk/api.go +++ b/service/elasticbeanstalk/api.go @@ -7564,7 +7564,7 @@ type DescribeEnvironmentManagedActionHistoryInput struct { EnvironmentName *string `min:"4" type:"string"` // The maximum number of items to return for a single request. - MaxItems *int64 `type:"integer"` + MaxItems *int64 `min:"1" type:"integer"` // The pagination token returned by a previous request. NextToken *string `type:"string"` @@ -7586,6 +7586,9 @@ func (s *DescribeEnvironmentManagedActionHistoryInput) Validate() error { if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } if invalidParams.Len() > 0 { return invalidParams diff --git a/service/fsx/api.go b/service/fsx/api.go index d147973274f..1a4a41c401d 100644 --- a/service/fsx/api.go +++ b/service/fsx/api.go @@ -269,7 +269,7 @@ func (c *FSx) CreateBackupRequest(input *CreateBackupInput) (req *request.Reques // For more information about backing up Amazon FSx for Lustre file systems, // see Working with FSx for Lustre backups (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). // -// For more information about backing up Amazon FSx for Lustre file systems, +// For more information about backing up Amazon FSx for Windows file systems, // see Working with FSx for Windows backups (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html). // // If a backup with the specified client request token exists, and the parameters @@ -400,9 +400,9 @@ func (c *FSx) CreateDataRepositoryTaskRequest(input *CreateDataRepositoryTaskInp // and symbolic links (symlinks) from your FSx file system to its linked data // repository. A CreateDataRepositoryTask operation will fail if a data repository // is not linked to the FSx file system. To learn more about data repository -// tasks, see Using Data Repository Tasks (https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-repository-tasks.html). -// To learn more about linking a data repository to your file system, see Setting -// the Export Prefix (https://docs.aws.amazon.com/fsx/latest/LustreGuide/export-data-repository.html#export-prefix). +// tasks, see Data Repository Tasks (https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-repository-tasks.html). +// To learn more about linking a data repository to your file system, see Linking +// your file system to an S3 bucket (https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-fs-linked-data-repo.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2060,6 +2060,8 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // // * DailyAutomaticBackupStartTime // +// * StorageCapacity +// // * WeeklyMaintenanceStartTime // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2216,8 +2218,8 @@ func (s *ActiveDirectoryError) RequestID() string { return s.RespMetadata.RequestID } -// Describes a specific Amazon FSx Administrative Action for the current Windows -// file system. +// Describes a specific Amazon FSx administrative action for the current Windows +// or Lustre file system. type AdministrativeAction struct { _ struct{} `type:"structure"` @@ -2229,11 +2231,16 @@ type AdministrativeAction struct { // // * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase // a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION - // task starts. Storage optimization is the process of migrating the file - // system data to the new, larger disks. You can track the storage migration - // progress using the ProgressPercent property. When STORAGE_OPTIMIZATION - // completes successfully, the parent FILE_SYSTEM_UPDATE action status changes - // to COMPLETED. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + // task starts. For Windows, storage optimization is the process of migrating + // the file system data to the new, larger disks. For Lustre, storage optimization + // consists of rebalancing the data across the existing and newly added file + // servers. You can track the storage optimization progress using the ProgressPercent + // property. When STORAGE_OPTIMIZATION completes successfully, the parent + // FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, + // see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. // // * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a // new DNS alias with the file system. For more information, see . @@ -2265,7 +2272,10 @@ type AdministrativeAction struct { // * UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon // FSx has updated the file system with the new storage capacity, and is // now performing the storage optimization process. For more information, - // see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + // see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. Status *string `type:"string" enum:"Status"` // Describes the target value for the administration action, provided in the @@ -2523,7 +2533,11 @@ func (s *AssociateFileSystemAliasesOutput) SetAliases(v []*Alias) *AssociateFile return s } -// A backup of an Amazon FSx for file system. +// A backup of an Amazon FSx file system. For more information see: +// +// * Working with backups for Windows file systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html) +// +// * Working with backups for Lustre file systems (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html) type Backup struct { _ struct{} `type:"structure"` @@ -2558,12 +2572,15 @@ type Backup struct { // // * AVAILABLE - The backup is fully available. // - // * CREATING - FSx is creating the backup. + // * PENDING - For user-initiated backups on Lustre file systems only; Amazon + // FSx has not started creating the backup. + // + // * CREATING - Amazon FSx is creating the backup. // - // * TRANSFERRING - For Lustre file systems only; FSx is transferring the - // backup to S3. + // * TRANSFERRING - For user-initiated backups on Lustre file systems only; + // Amazon FSx is transferring the backup to S3. // - // * DELETED - The backup was deleted is no longer available. + // * DELETED - Amazon FSx deleted the backup and it is no longer available. // // * FAILED - Amazon FSx could not complete the backup. // @@ -5853,7 +5870,7 @@ type FileSystem struct { // The Amazon Resource Name (ARN) for the file system resource. ResourceARN *string `min:"8" type:"string"` - // The storage capacity of the file system in gigabytes (GB). + // The storage capacity of the file system in gibibytes (GiB). StorageCapacity *int64 `type:"integer"` // The storage type of the file system. Valid values are SSD and HDD. If set @@ -7612,14 +7629,33 @@ type UpdateFileSystemInput struct { // UpdateFileSystem operation. LustreConfiguration *UpdateFileSystemLustreConfiguration `type:"structure"` - // Use this parameter to increase the storage capacity of an Amazon FSx for - // Windows File Server file system. Specifies the storage capacity target value, - // GiB, for the file system you're updating. The storage capacity target value - // must be at least 10 percent (%) greater than the current storage capacity - // value. In order to increase storage capacity, the file system needs to have - // at least 16 MB/s of throughput capacity. You cannot make a storage capacity - // increase request if there is an existing storage capacity increase request - // in progress. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + // Use this parameter to increase the storage capacity of an Amazon FSx file + // system. Specifies the storage capacity target value, GiB, to increase the + // storage capacity for the file system that you're updating. You cannot make + // a storage capacity increase request if there is an existing storage capacity + // increase request in progress. + // + // For Windows file systems, the storage capacity target value must be at least + // 10 percent (%) greater than the current storage capacity value. In order + // to increase storage capacity, the file system must have at least 16 MB/s + // of throughput capacity. + // + // For Lustre file systems, the storage capacity target value can be the following: + // + // * For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values are + // in multiples of 2400 GiB. The value must be greater than the current storage + // capacity. + // + // * For PERSISTENT HDD file systems, valid values are multiples of 6000 + // GiB for 12 MB/s/TiB file systems and multiples of 1800 GiB for 40 MB/s/TiB + // file systems. The values must be greater than the current storage capacity. + // + // * For SCRATCH_1 file systems, you cannot increase the storage capacity. + // + // For more information, see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. StorageCapacity *int64 `type:"integer"` // The configuration updates for an Amazon FSx for Windows File Server file @@ -8138,11 +8174,16 @@ func ActiveDirectoryErrorType_Values() []string { // // * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase // a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION -// task starts. Storage optimization is the process of migrating the file -// system data to the new, larger disks. You can track the storage migration -// progress using the ProgressPercent property. When STORAGE_OPTIMIZATION -// completes successfully, the parent FILE_SYSTEM_UPDATE action status changes -// to COMPLETED. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). +// task starts. For Windows, storage optimization is the process of migrating +// the file system data to the new, larger disks. For Lustre, storage optimization +// consists of rebalancing the data across the existing and newly added file +// servers. You can track the storage optimization progress using the ProgressPercent +// property. When STORAGE_OPTIMIZATION completes successfully, the parent +// FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, +// see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) +// in the Amazon FSx for Windows File Server User Guide and Managing storage +// and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) +// in the Amazon FSx for Lustre User Guide. // // * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a // new DNS alias with the file system. For more information, see . @@ -8225,12 +8266,15 @@ func AutoImportPolicyType_Values() []string { // // * AVAILABLE - The backup is fully available. // -// * CREATING - FSx is creating the new user-intiated backup +// * PENDING - For user-initiated backups on Lustre file systems only; Amazon +// FSx has not started creating the backup. +// +// * CREATING - Amazon FSx is creating the new user-intiated backup // // * TRANSFERRING - For user-initiated backups on Lustre file systems only; -// FSx is backing up the file system. +// Amazon FSx is backing up the file system. // -// * DELETED - The backup was deleted is no longer available. +// * DELETED - Amazon FSx deleted the backup and it is no longer available. // // * FAILED - Amazon FSx could not complete the backup. const ( @@ -8248,6 +8292,9 @@ const ( // BackupLifecycleFailed is a BackupLifecycle enum value BackupLifecycleFailed = "FAILED" + + // BackupLifecyclePending is a BackupLifecycle enum value + BackupLifecyclePending = "PENDING" ) // BackupLifecycle_Values returns all elements of the BackupLifecycle enum @@ -8258,6 +8305,7 @@ func BackupLifecycle_Values() []string { BackupLifecycleTransferring, BackupLifecycleDeleted, BackupLifecycleFailed, + BackupLifecyclePending, } } diff --git a/service/gamelift/api.go b/service/gamelift/api.go index d756494a899..978b9bda1f4 100644 --- a/service/gamelift/api.go +++ b/service/gamelift/api.go @@ -83,9 +83,9 @@ func (c *GameLift) AcceptMatchRequest(input *AcceptMatchInput) (req *request.Req // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html) // -// FlexMatch Events Reference (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-events.html) +// FlexMatch Events Reference (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html) // // Related operations // @@ -1234,30 +1234,35 @@ func (c *GameLift) CreateMatchmakingConfigurationRequest(input *CreateMatchmakin // CreateMatchmakingConfiguration API operation for Amazon GameLift. // -// Defines a new matchmaking configuration for use with FlexMatch. A matchmaking -// configuration sets out guidelines for matching players and getting the matches -// into games. You can set up multiple matchmaking configurations to handle -// the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking -// or StartMatchBackfill) specifies a configuration for the match and provides -// player attributes to support the configuration being used. -// -// To create a matchmaking configuration, at a minimum you must specify the -// following: configuration name; a rule set that governs how to evaluate players -// and find acceptable matches; a game session queue to use when placing a new -// game session for the match; and the maximum time allowed for a matchmaking -// attempt. -// -// To track the progress of matchmaking tickets, set up an Amazon Simple Notification -// Service (SNS) to receive notifications, and provide the topic ARN in the -// matchmaking configuration. An alternative method, continuously poling ticket -// status with DescribeMatchmaking, should only be used for games in development -// with low matchmaking usage. +// Defines a new matchmaking configuration for use with FlexMatch. Whether your +// are using FlexMatch with GameLift hosting or as a standalone matchmaking +// service, the matchmaking configuration sets out rules for matching players +// and forming teams. If you're also using GameLift hosting, it defines how +// to start game sessions for each match. Your matchmaking system can use multiple +// configurations to handle different game scenarios. All matchmaking requests +// (StartMatchmaking or StartMatchBackfill) identify the matchmaking configuration +// to use and provide player attributes consistent with that configuration. +// +// To create a matchmaking configuration, you must provide the following: configuration +// name and FlexMatch mode (with or without GameLift hosting); a rule set that +// specifies how to evaluate players and find acceptable matches; whether player +// acceptance is required; and the maximum time allowed for a matchmaking attempt. +// When using FlexMatch with GameLift hosting, you also need to identify the +// game session queue to use when starting a game session for the match. +// +// In addition, you must set up an Amazon Simple Notification Service (SNS) +// to receive matchmaking notifications, and provide the topic ARN in the matchmaking +// configuration. An alternative method, continuously polling ticket status +// with DescribeMatchmaking, is only suitable for games in development with +// low matchmaking usage. // // Learn more // -// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) +// FlexMatch Developer Guide (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html) // -// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html) +// +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) // // Related operations // @@ -1391,11 +1396,11 @@ func (c *GameLift) CreateMatchmakingRuleSetRequest(input *CreateMatchmakingRuleS // // Learn more // -// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) +// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html) // -// * Design a Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) +// * Design a Matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html) // -// * Matchmaking with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html) +// * Matchmaking with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-intro.html) // // Related operations // @@ -2900,7 +2905,7 @@ func (c *GameLift) DeleteMatchmakingRuleSetRequest(input *DeleteMatchmakingRuleS // // Learn more // -// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) +// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html) // // Related operations // @@ -6021,9 +6026,9 @@ func (c *GameLift) DescribeMatchmakingRequest(input *DescribeMatchmakingInput) ( // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html) // -// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) // // Related operations // @@ -6143,7 +6148,7 @@ func (c *GameLift) DescribeMatchmakingConfigurationsRequest(input *DescribeMatch // // Learn more // -// Setting Up FlexMatch Matchmakers (https://docs.aws.amazon.com/gamelift/latest/developerguide/matchmaker-build.html) +// Setting Up FlexMatch Matchmakers (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/matchmaker-build.html) // // Related operations // @@ -6315,7 +6320,7 @@ func (c *GameLift) DescribeMatchmakingRuleSetsRequest(input *DescribeMatchmaking // // Learn more // -// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) +// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html) // // Related operations // @@ -9803,13 +9808,13 @@ func (c *GameLift) StartMatchBackfillRequest(input *StartMatchBackfillInput) (re // game session's connection information, and the GameSession object is updated // to include matchmaker data on the new players. For more detail on how match // backfill requests are processed, see How Amazon GameLift FlexMatch Works -// (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html). +// (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html). // // Learn more // -// Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html) +// Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html) // -// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html) +// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html) // // Related operations // @@ -9914,61 +9919,35 @@ func (c *GameLift) StartMatchmakingRequest(input *StartMatchmakingInput) (req *r // StartMatchmaking API operation for Amazon GameLift. // // Uses FlexMatch to create a game match for a group of players based on custom -// matchmaking rules, and starts a new game for the matched players. Each matchmaking -// request specifies the type of match to build (team configuration, rules for -// an acceptable match, etc.). The request also specifies the players to find -// a match for and where to host the new game session for optimal performance. -// A matchmaking request might start with a single player or a group of players -// who want to play together. FlexMatch finds additional players as needed to -// fill the match. Match type, rules, and the queue used to place a new game -// session are defined in a MatchmakingConfiguration. +// matchmaking rules. If you're also using GameLift hosting, a new game session +// is started for the matched players. Each matchmaking request identifies one +// or more players to find a match for, and specifies the type of match to build, +// including the team configuration and the rules for an acceptable match. When +// a matchmaking request identifies a group of players who want to play together, +// FlexMatch finds additional players to fill the match. Match type, rules, +// and other features are defined in a MatchmakingConfiguration. // // To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, -// and include the players to be matched. You must also include a set of player -// attributes relevant for the matchmaking configuration. If successful, a matchmaking -// ticket is returned with status set to QUEUED. -// -// Track the status of the ticket to respond as needed and acquire game session -// connection information for successfully completed matches. Ticket status -// updates are tracked using event notification through Amazon Simple Notification -// Service (SNS), which is defined in the matchmaking configuration. -// -// Processing a matchmaking request -- FlexMatch handles a matchmaking request -// as follows: -// -// Your client code submits a StartMatchmaking request for one or more players -// and tracks the status of the request ticket. -// -// FlexMatch uses this ticket and others in process to build an acceptable match. -// When a potential match is identified, all tickets in the proposed match are -// advanced to the next status. -// -// If the match requires player acceptance (set in the matchmaking configuration), -// the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your -// client code to solicit acceptance from all players in every ticket involved -// in the match, and then call AcceptMatch for each player. If any player rejects -// or fails to accept the match before a specified timeout, the proposed match -// is dropped (see AcceptMatch for more details). -// -// Once a match is proposed and accepted, the matchmaking tickets move into -// status PLACING. FlexMatch locates resources for a new game session using -// the game session queue (set in the matchmaking configuration) and creates -// the game session based on the match data. -// -// When the match is successfully placed, the matchmaking tickets move into -// COMPLETED status. Connection information (including game session endpoint -// and player session) is added to the matchmaking tickets. Matched players -// can use the connection information to join the game. +// and include the players to be matched. For each player, you must also include +// the player attribute values that are required by the matchmaking configuration +// (in the rule set). If successful, a matchmaking ticket is returned with status +// set to QUEUED. +// +// Track the status of the ticket to respond as needed. If you're also using +// GameLift hosting, a successfully completed ticket contains game session connection +// information. Ticket status updates are tracked using event notification through +// Amazon Simple Notification Service (SNS), which is defined in the matchmaking +// configuration. // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html) // -// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) // -// FlexMatch Integration Roadmap (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-tasks.html) +// FlexMatch Integration Roadmap (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-tasks.html) // -// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html) +// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html) // // Related operations // @@ -10315,7 +10294,7 @@ func (c *GameLift) StopMatchmakingRequest(input *StopMatchmakingInput) (req *req // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html) // // Related operations // @@ -11930,7 +11909,7 @@ func (c *GameLift) UpdateMatchmakingConfigurationRequest(input *UpdateMatchmakin // // Learn more // -// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) +// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html) // // Related operations // @@ -12297,7 +12276,7 @@ func (c *GameLift) ValidateMatchmakingRuleSetRequest(input *ValidateMatchmakingR // // Learn more // -// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) +// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html) // // Related operations // @@ -13106,12 +13085,14 @@ type CreateBuildInput struct { // cannot be changed later. OperatingSystem *string `type:"string" enum:"OperatingSystem"` - // Information indicating where your game build files are stored. Use this parameter - // only when creating a build with files stored in an S3 bucket that you own. - // The storage location must specify an S3 bucket name and key. The location - // must also specify a role ARN that you set up to allow Amazon GameLift to - // access your S3 bucket. The S3 bucket and your new build must be in the same - // Region. + // The location where your game build files are stored. Use this parameter only + // when creating a build using files that are stored in an S3 bucket that you + // own. Identify an S3 bucket name and key, which must in the same Region where + // you're creating a build. This parameter must also specify the ARN for an + // IAM role that you've set up to give Amazon GameLift access your S3 bucket. + // To call this operation with a storage location, you must have IAM PassRole + // permission. For more details on IAM roles and PassRole permissions, see Set + // up a role for GameLift access (https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html). StorageLocation *S3Location `type:"structure"` // A list of labels to assign to the new build resource. Tags are developer-defined @@ -13302,12 +13283,11 @@ type CreateFleetInput struct { FleetType *string `type:"string" enum:"FleetType"` // A unique identifier for an AWS IAM role that manages access to your AWS services. - // With an instance role ARN set, any application that runs on an instance in - // this fleet can assume the role, including install scripts, server processes, - // and daemons (background processes). Create a role or look up a role's ARN - // from the IAM dashboard (https://console.aws.amazon.com/iam/) in the AWS Management - // Console. Learn more about using on-box credentials for your game servers - // at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). + // Fleets with an instance role ARN allow applications that are running on the + // fleet's instances to assume the role. Learn more about using on-box credentials + // for your game servers at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). + // To call this operation with instance role ARN, you must have IAM PassRole + // permissions. See IAM policy examples for GameLift (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-iam-policy-examples.html). InstanceRoleArn *string `min:"1" type:"string"` // This parameter is no longer used. Instead, to specify where Amazon GameLift @@ -13728,7 +13708,7 @@ type CreateGameServerGroupInput struct { // up. This property cannot be updated after the game server group is created, // and the corresponding Auto Scaling group will always use the property value // that is set with this request, even if the Auto Scaling group is updated - // directly + // directly. VpcSubnets []*string `min:"1" type:"list"` } @@ -14239,19 +14219,23 @@ type CreateMatchmakingConfigurationInput struct { // A flag that determines whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. + // With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE + // to indicate when a completed potential match is waiting for player acceptance. // // AcceptanceRequired is a required field AcceptanceRequired *bool `type:"boolean" required:"true"` // The length of time (in seconds) to wait for players to accept a proposed - // match. If any player rejects the match or fails to accept before the timeout, - // the ticket continues to look for an acceptable match. + // match, if acceptance is required. If any player rejects the match or fails + // to accept before the timeout, the tickets are returned to the ticket pool + // and continue to be evaluated for an acceptable match. AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` // The number of player slots in a match to keep open for future players. For // example, assume that the configuration's rule set specifies a match for a // single 12-person team. If the additional player count is set to 2, only 10 - // players are initially selected for the match. + // players are initially selected for the match. This parameter is not used + // if FlexMatchMode is set to STANDALONE. AdditionalPlayerCount *int64 `type:"integer"` // The method used to backfill game sessions that are created with this matchmaking @@ -14259,7 +14243,8 @@ type CreateMatchmakingConfigurationInput struct { // or does not use the match backfill feature. Specify AUTOMATIC to have GameLift // create a StartMatchBackfill request whenever a game session has one or more // open slots. Learn more about manual and automatic backfill in Backfill Existing - // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html). + // Automatic backfill is not available when FlexMatchMode is set to STANDALONE. BackfillMode *string `type:"string" enum:"BackfillMode"` // Information to be added to all events related to this matchmaking configuration. @@ -14268,28 +14253,40 @@ type CreateMatchmakingConfigurationInput struct { // A human-readable description of the matchmaking configuration. Description *string `min:"1" type:"string"` + // Indicates whether this matchmaking configuration is being used with GameLift + // hosting or as a standalone matchmaking solution. + // + // * STANDALONE - FlexMatch forms matches and returns match information, + // including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded) + // event. + // + // * WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift + // queue to start a game session for the match. + FlexMatchMode *string `type:"string" enum:"FlexMatchMode"` + // A set of custom properties for a game session, formatted as key-value pairs. // These properties are passed to a game server process in the GameSession object // with a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used if FlexMatchMode is set to + // STANDALONE. GameProperties []*GameProperty `type:"list"` // A set of custom game session properties, formatted as a single string value. // This data is passed to a game server process in the GameSession object with // a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used if FlexMatchMode is set to + // STANDALONE. GameSessionData *string `min:"1" type:"string"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a GameLift game session queue resource and uniquely identifies - // it. ARNs are unique across all Regions. These queues are used when placing - // game sessions for matches that are created with this matchmaking configuration. - // Queues can be located in any Region. - // - // GameSessionQueueArns is a required field - GameSessionQueueArns []*string `type:"list" required:"true"` + // it. ARNs are unique across all Regions. Queues can be located in any Region. + // Queues are used to start new GameLift-hosted game sessions for matches that + // are created with this matchmaking configuration. If FlexMatchMode is set + // to STANDALONE, do not set this parameter. + GameSessionQueueArns []*string `type:"list"` // A unique identifier for a matchmaking configuration. This name is used to // identify the configuration associated with a matchmaking request or ticket. @@ -14350,9 +14347,6 @@ func (s *CreateMatchmakingConfigurationInput) Validate() error { if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) } - if s.GameSessionQueueArns == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionQueueArns")) - } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -14431,6 +14425,12 @@ func (s *CreateMatchmakingConfigurationInput) SetDescription(v string) *CreateMa return s } +// SetFlexMatchMode sets the FlexMatchMode field's value. +func (s *CreateMatchmakingConfigurationInput) SetFlexMatchMode(v string) *CreateMatchmakingConfigurationInput { + s.FlexMatchMode = &v + return s +} + // SetGameProperties sets the GameProperties field's value. func (s *CreateMatchmakingConfigurationInput) SetGameProperties(v []*GameProperty) *CreateMatchmakingConfigurationInput { s.GameProperties = v @@ -14812,14 +14812,15 @@ type CreateScriptInput struct { // need to be unique. You can use UpdateScript to change this value later. Name *string `min:"1" type:"string"` - // The location of the Amazon S3 bucket where a zipped file containing your - // Realtime scripts is stored. The storage location must specify the Amazon - // S3 bucket name, the zip file name (the "key"), and a role ARN that allows - // Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must - // be in the same Region where you want to create a new script. By default, - // Amazon GameLift uploads the latest version of the zip file; if you have S3 - // object versioning turned on, you can use the ObjectVersion parameter to specify - // an earlier version. + // The Amazon S3 location of your Realtime scripts. The storage location must + // specify the S3 bucket name, the zip file name (the "key"), and an IAM role + // ARN that allows Amazon GameLift to access the S3 storage location. The S3 + // bucket must be in the same Region where you are creating a new script. By + // default, Amazon GameLift uploads the latest version of the zip file; if you + // have S3 object versioning turned on, you can use the ObjectVersion parameter + // to specify an earlier version. To call this operation with a storage location, + // you must have IAM PassRole permission. For more details on IAM roles and + // PassRole permissions, see Set up a role for GameLift access (https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html). StorageLocation *S3Location `type:"structure"` // A list of labels to assign to the new script resource. Tags are developer-defined @@ -15297,8 +15298,8 @@ type DeleteGameServerGroupInput struct { // The type of delete to perform. Options include the following: // - // * SAFE_DELETE – Terminates the game server group and EC2 Auto Scaling - // group only when it has no game servers that are in UTILIZED status. + // * SAFE_DELETE – (default) Terminates the game server group and EC2 Auto + // Scaling group only when it has no game servers that are in UTILIZED status. // // * FORCE_DELETE – Terminates the game server group, including all active // game servers regardless of their utilization status, and the EC2 Auto @@ -18576,12 +18577,6 @@ type FleetAttributes struct { FleetType *string `type:"string" enum:"FleetType"` // A unique identifier for an AWS IAM role that manages access to your AWS services. - // With an instance role ARN set, any application that runs on an instance in - // this fleet can assume the role, including install scripts, server processes, - // and daemons (background processes). Create a role or look up a role's ARN - // from the IAM dashboard (https://console.aws.amazon.com/iam/) in the AWS Management - // Console. Learn more about using on-box credentials for your game servers - // at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). InstanceRoleArn *string `min:"1" type:"string"` // EC2 instance type indicating the computing resources of each instance in @@ -19683,7 +19678,7 @@ type GameSession struct { // session. It is in JSON syntax, formatted as a string. In addition the matchmaking // configuration used, it contains data on all players assigned to the match, // including player attributes and team assignments. For more details on matchmaker - // data, see Match Data (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). + // data, see Match Data (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data). // Matchmaker data is useful when requesting match backfills, and is updated // whenever new players are added during a successful backfill (see StartMatchBackfill). MatchmakerData *string `min:"1" type:"string"` @@ -19835,12 +19830,12 @@ func (s *GameSession) SetTerminationTime(v time.Time) *GameSession { return s } -// Connection information for the new game session that is created with matchmaking. -// (with StartMatchmaking). Once a match is set, the FlexMatch engine places -// the match and creates a new game session for it. This information, including -// the game session endpoint and player sessions for each player in the original -// matchmaking request, is added to the MatchmakingTicket, which can be retrieved -// by calling DescribeMatchmaking. +// Connection information for a new game session that is created in response +// to a StartMatchmaking request. Once a match is made, the FlexMatch engine +// creates a new game session for it. This information, including the game session +// endpoint and player sessions for each player in the original matchmaking +// request, is added to the MatchmakingTicket, which can be retrieved by calling +// DescribeMatchmaking. type GameSessionConnectionInfo struct { _ struct{} `type:"structure"` @@ -20083,7 +20078,7 @@ type GameSessionPlacement struct { // formatted as a string. It identifies the matchmaking configuration used to // create the match, and contains data on all players assigned to the match, // including player attributes and team assignments. For more details on matchmaker - // data, see Match Data (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). + // data, see Match Data (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data). MatchmakerData *string `min:"1" type:"string"` // The maximum number of players that can be connected simultaneously to the @@ -22068,24 +22063,29 @@ type MatchmakingConfiguration struct { // A flag that indicates whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. + // When this option is enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE + // to indicate when a completed potential match is waiting for player acceptance. AcceptanceRequired *bool `type:"boolean"` // The length of time (in seconds) to wait for players to accept a proposed - // match. If any player rejects the match or fails to accept before the timeout, - // the ticket continues to look for an acceptable match. + // match, if acceptance is required. If any player rejects the match or fails + // to accept before the timeout, the tickets are returned to the ticket pool + // and continue to be evaluated for an acceptable match. AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` // The number of player slots in a match to keep open for future players. For // example, assume that the configuration's rule set specifies a match for a // single 12-person team. If the additional player count is set to 2, only 10 - // players are initially selected for the match. + // players are initially selected for the match. This parameter is not used + // when FlexMatchMode is set to STANDALONE. AdditionalPlayerCount *int64 `type:"integer"` // The method used to backfill game sessions created with this matchmaking configuration. // MANUAL indicates that the game makes backfill requests or does not use the // match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill // requests whenever a game session has one or more open slots. Learn more about - // manual and automatic backfill in Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + // manual and automatic backfill in Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html). + // Automatic backfill is not available when FlexMatchMode is set to STANDALONE. BackfillMode *string `type:"string" enum:"BackfillMode"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) @@ -22104,25 +22104,39 @@ type MatchmakingConfiguration struct { // A descriptive label that is associated with matchmaking configuration. Description *string `min:"1" type:"string"` + // Indicates whether this matchmaking configuration is being used with GameLift + // hosting or as a standalone matchmaking solution. + // + // * STANDALONE - FlexMatch forms matches and returns match information, + // including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded) + // event. + // + // * WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift + // queue to start a game session for the match. + FlexMatchMode *string `type:"string" enum:"FlexMatchMode"` + // A set of custom properties for a game session, formatted as key-value pairs. // These properties are passed to a game server process in the GameSession object // with a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used when FlexMatchMode is set + // to STANDALONE. GameProperties []*GameProperty `type:"list"` // A set of custom game session properties, formatted as a single string value. // This data is passed to a game server process in the GameSession object with // a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used when FlexMatchMode is set + // to STANDALONE. GameSessionData *string `min:"1" type:"string"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a GameLift game session queue resource and uniquely identifies - // it. ARNs are unique across all Regions. GameLift uses the listed queues when - // placing game sessions for matches that are created with this matchmaking - // configuration. Queues can be located in any Region. + // it. ARNs are unique across all Regions. Queues can be located in any Region. + // Queues are used to start new GameLift-hosted game sessions for matches that + // are created with this matchmaking configuration. Thais property is not set + // when FlexMatchMode is set to STANDALONE. GameSessionQueueArns []*string `type:"list"` // A unique identifier for a matchmaking configuration. This name is used to @@ -22206,6 +22220,12 @@ func (s *MatchmakingConfiguration) SetDescription(v string) *MatchmakingConfigur return s } +// SetFlexMatchMode sets the FlexMatchMode field's value. +func (s *MatchmakingConfiguration) SetFlexMatchMode(v string) *MatchmakingConfiguration { + s.FlexMatchMode = &v + return s +} + // SetGameProperties sets the GameProperties field's value. func (s *MatchmakingConfiguration) SetGameProperties(v []*GameProperty) *MatchmakingConfiguration { s.GameProperties = v @@ -22261,7 +22281,7 @@ func (s *MatchmakingConfiguration) SetRuleSetName(v string) *MatchmakingConfigur // // A rule set may define the following elements for a match. For detailed information // and examples showing how to construct a rule set, see Build a FlexMatch Rule -// Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html). +// Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html). // // * Teams -- Required. A rule set must define one or multiple teams for // the match and set minimum and maximum team sizes. For example, a rule @@ -22372,7 +22392,8 @@ type MatchmakingTicket struct { // Identifier and connection information of the game session created for the // match. This information is added to the ticket only after the matchmaking - // request has been successfully completed. + // request has been successfully completed. This parameter is not set when FlexMatch + // is being used without GameLift hosting. GameSessionConnectionInfo *GameSessionConnectionInfo `type:"structure"` // A set of Player objects, each representing a player to find matches for. @@ -24758,9 +24779,7 @@ type StartMatchBackfillInput struct { // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a game session and uniquely identifies it. This is the // same as the game session ID. - // - // GameSessionArn is a required field - GameSessionArn *string `min:"1" type:"string" required:"true"` + GameSessionArn *string `min:"1" type:"string"` // Match information on all players that are currently assigned to the game // session. This information is used by the matchmaker to find new players and @@ -24769,7 +24788,7 @@ type StartMatchBackfillInput struct { // * PlayerID, PlayerAttributes, Team -\\- This information is maintained // in the GameSession object, MatchmakerData property, for all players who // are currently assigned to the game session. The matchmaker data is in - // JSON syntax, formatted as a string. For more details, see Match Data (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). + // JSON syntax, formatted as a string. For more details, see Match Data (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data). // // * LatencyInMs -\\- If the matchmaker uses player latency, include a latency // value, in milliseconds, for the Region that the game session is currently @@ -24803,9 +24822,6 @@ func (s *StartMatchBackfillInput) Validate() error { if s.ConfigurationName != nil && len(*s.ConfigurationName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ConfigurationName", 1)) } - if s.GameSessionArn == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionArn")) - } if s.GameSessionArn != nil && len(*s.GameSessionArn) < 1 { invalidParams.Add(request.NewErrParamMinLen("GameSessionArn", 1)) } @@ -26845,17 +26861,21 @@ type UpdateMatchmakingConfigurationInput struct { // A flag that indicates whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. + // With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE + // to indicate when a completed potential match is waiting for player acceptance. AcceptanceRequired *bool `type:"boolean"` // The length of time (in seconds) to wait for players to accept a proposed - // match. If any player rejects the match or fails to accept before the timeout, - // the ticket continues to look for an acceptable match. + // match, if acceptance is required. If any player rejects the match or fails + // to accept before the timeout, the tickets are returned to the ticket pool + // and continue to be evaluated for an acceptable match. AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` // The number of player slots in a match to keep open for future players. For // example, assume that the configuration's rule set specifies a match for a // single 12-person team. If the additional player count is set to 2, only 10 - // players are initially selected for the match. + // players are initially selected for the match. This parameter is not used + // if FlexMatchMode is set to STANDALONE. AdditionalPlayerCount *int64 `type:"integer"` // The method that is used to backfill game sessions created with this matchmaking @@ -26863,7 +26883,8 @@ type UpdateMatchmakingConfigurationInput struct { // or does not use the match backfill feature. Specify AUTOMATIC to have GameLift // create a StartMatchBackfill request whenever a game session has one or more // open slots. Learn more about manual and automatic backfill in Backfill Existing - // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html). + // Automatic backfill is not available when FlexMatchMode is set to STANDALONE. BackfillMode *string `type:"string" enum:"BackfillMode"` // Information to add to all events related to the matchmaking configuration. @@ -26872,25 +26893,39 @@ type UpdateMatchmakingConfigurationInput struct { // A descriptive label that is associated with matchmaking configuration. Description *string `min:"1" type:"string"` + // Indicates whether this matchmaking configuration is being used with GameLift + // hosting or as a standalone matchmaking solution. + // + // * STANDALONE - FlexMatch forms matches and returns match information, + // including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded) + // event. + // + // * WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift + // queue to start a game session for the match. + FlexMatchMode *string `type:"string" enum:"FlexMatchMode"` + // A set of custom properties for a game session, formatted as key-value pairs. // These properties are passed to a game server process in the GameSession object // with a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used if FlexMatchMode is set to + // STANDALONE. GameProperties []*GameProperty `type:"list"` // A set of custom game session properties, formatted as a single string value. // This data is passed to a game server process in the GameSession object with // a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used if FlexMatchMode is set to + // STANDALONE. GameSessionData *string `min:"1" type:"string"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a GameLift game session queue resource and uniquely identifies - // it. ARNs are unique across all Regions. These queues are used when placing - // game sessions for matches that are created with this matchmaking configuration. - // Queues can be located in any Region. + // it. ARNs are unique across all Regions. Queues can be located in any Region. + // Queues are used to start new GameLift-hosted game sessions for matches that + // are created with this matchmaking configuration. If FlexMatchMode is set + // to STANDALONE, do not set this parameter. GameSessionQueueArns []*string `type:"list"` // A unique identifier for a matchmaking configuration to update. You can use @@ -26900,7 +26935,7 @@ type UpdateMatchmakingConfigurationInput struct { Name *string `min:"1" type:"string" required:"true"` // An SNS topic ARN that is set up to receive matchmaking notifications. See - // Setting up Notifications for Matchmaking (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) + // Setting up Notifications for Matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) // for more information. NotificationTarget *string `type:"string"` @@ -27002,6 +27037,12 @@ func (s *UpdateMatchmakingConfigurationInput) SetDescription(v string) *UpdateMa return s } +// SetFlexMatchMode sets the FlexMatchMode field's value. +func (s *UpdateMatchmakingConfigurationInput) SetFlexMatchMode(v string) *UpdateMatchmakingConfigurationInput { + s.FlexMatchMode = &v + return s +} + // SetGameProperties sets the GameProperties field's value. func (s *UpdateMatchmakingConfigurationInput) SetGameProperties(v []*GameProperty) *UpdateMatchmakingConfigurationInput { s.GameProperties = v @@ -27171,14 +27212,15 @@ type UpdateScriptInput struct { // ScriptId is a required field ScriptId *string `type:"string" required:"true"` - // The location of the Amazon S3 bucket where a zipped file containing your - // Realtime scripts is stored. The storage location must specify the Amazon - // S3 bucket name, the zip file name (the "key"), and a role ARN that allows - // Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must - // be in the same Region where you want to create a new script. By default, + // The Amazon S3 location of your Realtime scripts. The storage location must + // specify the S3 bucket name, the zip file name (the "key"), and an IAM role + // ARN that allows Amazon GameLift to access the S3 storage location. The S3 + // bucket must be in the same Region as the script you're updating. By default, // Amazon GameLift uploads the latest version of the zip file; if you have S3 // object versioning turned on, you can use the ObjectVersion parameter to specify - // an earlier version. + // an earlier version. To call this operation with a storage location, you must + // have IAM PassRole permission. For more details on IAM roles and PassRole + // permissions, see Set up a role for GameLift access (https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html). StorageLocation *S3Location `type:"structure"` // The version that is associated with a build or script. Version strings do @@ -27757,6 +27799,30 @@ const ( // EC2InstanceTypeC524xlarge is a EC2InstanceType enum value EC2InstanceTypeC524xlarge = "c5.24xlarge" + // EC2InstanceTypeC5aLarge is a EC2InstanceType enum value + EC2InstanceTypeC5aLarge = "c5a.large" + + // EC2InstanceTypeC5aXlarge is a EC2InstanceType enum value + EC2InstanceTypeC5aXlarge = "c5a.xlarge" + + // EC2InstanceTypeC5a2xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a2xlarge = "c5a.2xlarge" + + // EC2InstanceTypeC5a4xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a4xlarge = "c5a.4xlarge" + + // EC2InstanceTypeC5a8xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a8xlarge = "c5a.8xlarge" + + // EC2InstanceTypeC5a12xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a12xlarge = "c5a.12xlarge" + + // EC2InstanceTypeC5a16xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a16xlarge = "c5a.16xlarge" + + // EC2InstanceTypeC5a24xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a24xlarge = "c5a.24xlarge" + // EC2InstanceTypeR3Large is a EC2InstanceType enum value EC2InstanceTypeR3Large = "r3.large" @@ -27814,6 +27880,30 @@ const ( // EC2InstanceTypeR524xlarge is a EC2InstanceType enum value EC2InstanceTypeR524xlarge = "r5.24xlarge" + // EC2InstanceTypeR5aLarge is a EC2InstanceType enum value + EC2InstanceTypeR5aLarge = "r5a.large" + + // EC2InstanceTypeR5aXlarge is a EC2InstanceType enum value + EC2InstanceTypeR5aXlarge = "r5a.xlarge" + + // EC2InstanceTypeR5a2xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a2xlarge = "r5a.2xlarge" + + // EC2InstanceTypeR5a4xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a4xlarge = "r5a.4xlarge" + + // EC2InstanceTypeR5a8xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a8xlarge = "r5a.8xlarge" + + // EC2InstanceTypeR5a12xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a12xlarge = "r5a.12xlarge" + + // EC2InstanceTypeR5a16xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a16xlarge = "r5a.16xlarge" + + // EC2InstanceTypeR5a24xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a24xlarge = "r5a.24xlarge" + // EC2InstanceTypeM3Medium is a EC2InstanceType enum value EC2InstanceTypeM3Medium = "m3.medium" @@ -27864,6 +27954,30 @@ const ( // EC2InstanceTypeM524xlarge is a EC2InstanceType enum value EC2InstanceTypeM524xlarge = "m5.24xlarge" + + // EC2InstanceTypeM5aLarge is a EC2InstanceType enum value + EC2InstanceTypeM5aLarge = "m5a.large" + + // EC2InstanceTypeM5aXlarge is a EC2InstanceType enum value + EC2InstanceTypeM5aXlarge = "m5a.xlarge" + + // EC2InstanceTypeM5a2xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a2xlarge = "m5a.2xlarge" + + // EC2InstanceTypeM5a4xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a4xlarge = "m5a.4xlarge" + + // EC2InstanceTypeM5a8xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a8xlarge = "m5a.8xlarge" + + // EC2InstanceTypeM5a12xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a12xlarge = "m5a.12xlarge" + + // EC2InstanceTypeM5a16xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a16xlarge = "m5a.16xlarge" + + // EC2InstanceTypeM5a24xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a24xlarge = "m5a.24xlarge" ) // EC2InstanceType_Values returns all elements of the EC2InstanceType enum @@ -27891,6 +28005,14 @@ func EC2InstanceType_Values() []string { EC2InstanceTypeC512xlarge, EC2InstanceTypeC518xlarge, EC2InstanceTypeC524xlarge, + EC2InstanceTypeC5aLarge, + EC2InstanceTypeC5aXlarge, + EC2InstanceTypeC5a2xlarge, + EC2InstanceTypeC5a4xlarge, + EC2InstanceTypeC5a8xlarge, + EC2InstanceTypeC5a12xlarge, + EC2InstanceTypeC5a16xlarge, + EC2InstanceTypeC5a24xlarge, EC2InstanceTypeR3Large, EC2InstanceTypeR3Xlarge, EC2InstanceTypeR32xlarge, @@ -27910,6 +28032,14 @@ func EC2InstanceType_Values() []string { EC2InstanceTypeR512xlarge, EC2InstanceTypeR516xlarge, EC2InstanceTypeR524xlarge, + EC2InstanceTypeR5aLarge, + EC2InstanceTypeR5aXlarge, + EC2InstanceTypeR5a2xlarge, + EC2InstanceTypeR5a4xlarge, + EC2InstanceTypeR5a8xlarge, + EC2InstanceTypeR5a12xlarge, + EC2InstanceTypeR5a16xlarge, + EC2InstanceTypeR5a24xlarge, EC2InstanceTypeM3Medium, EC2InstanceTypeM3Large, EC2InstanceTypeM3Xlarge, @@ -27927,6 +28057,14 @@ func EC2InstanceType_Values() []string { EC2InstanceTypeM512xlarge, EC2InstanceTypeM516xlarge, EC2InstanceTypeM524xlarge, + EC2InstanceTypeM5aLarge, + EC2InstanceTypeM5aXlarge, + EC2InstanceTypeM5a2xlarge, + EC2InstanceTypeM5a4xlarge, + EC2InstanceTypeM5a8xlarge, + EC2InstanceTypeM5a12xlarge, + EC2InstanceTypeM5a16xlarge, + EC2InstanceTypeM5a24xlarge, } } @@ -28142,6 +28280,22 @@ func FleetType_Values() []string { } } +const ( + // FlexMatchModeStandalone is a FlexMatchMode enum value + FlexMatchModeStandalone = "STANDALONE" + + // FlexMatchModeWithQueue is a FlexMatchMode enum value + FlexMatchModeWithQueue = "WITH_QUEUE" +) + +// FlexMatchMode_Values returns all elements of the FlexMatchMode enum +func FlexMatchMode_Values() []string { + return []string{ + FlexMatchModeStandalone, + FlexMatchModeWithQueue, + } +} + const ( // GameServerClaimStatusClaimed is a GameServerClaimStatus enum value GameServerClaimStatusClaimed = "CLAIMED" diff --git a/service/iotsitewise/api.go b/service/iotsitewise/api.go index a6500d4c8cb..5276377b726 100644 --- a/service/iotsitewise/api.go +++ b/service/iotsitewise/api.go @@ -1169,8 +1169,8 @@ func (c *IoTSiteWise) CreatePresignedPortalUrlRequest(input *CreatePresignedPort // Creates a pre-signed URL to a portal. Use this operation to create URLs to // portals that use AWS Identity and Access Management (IAM) to authenticate // users. An IAM user with access to a portal can call this API to get a URL -// to that portal. The URL contains a session token that lets the IAM user access -// the portal. +// to that portal. The URL contains an authentication token that lets the IAM +// user access the portal. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2511,6 +2511,100 @@ func (c *IoTSiteWise) DescribeDashboardWithContext(ctx aws.Context, input *Descr return out, req.Send() } +const opDescribeDefaultEncryptionConfiguration = "DescribeDefaultEncryptionConfiguration" + +// DescribeDefaultEncryptionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDefaultEncryptionConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDefaultEncryptionConfiguration for more information on using the DescribeDefaultEncryptionConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDefaultEncryptionConfigurationRequest method. +// req, resp := client.DescribeDefaultEncryptionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/DescribeDefaultEncryptionConfiguration +func (c *IoTSiteWise) DescribeDefaultEncryptionConfigurationRequest(input *DescribeDefaultEncryptionConfigurationInput) (req *request.Request, output *DescribeDefaultEncryptionConfigurationOutput) { + op := &request.Operation{ + Name: opDescribeDefaultEncryptionConfiguration, + HTTPMethod: "GET", + HTTPPath: "/configuration/account/encryption", + } + + if input == nil { + input = &DescribeDefaultEncryptionConfigurationInput{} + } + + output = &DescribeDefaultEncryptionConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDefaultEncryptionConfiguration API operation for AWS IoT SiteWise. +// +// Retrieves information about the default encryption configuration for the +// AWS account in the default or specified region. For more information, see +// Key management (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/key-management.html) +// in the AWS IoT SiteWise User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT SiteWise's +// API operation DescribeDefaultEncryptionConfiguration for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request isn't valid. This can occur if your request contains malformed +// JSON or unsupported characters. Check your request and try again. +// +// * InternalFailureException +// AWS IoT SiteWise can't process your request right now. Try again later. +// +// * ThrottlingException +// Your request exceeded a rate limit. For example, you might have exceeded +// the number of AWS IoT SiteWise assets that can be created per second, the +// allowed number of messages per second, and so on. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the AWS IoT SiteWise User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/DescribeDefaultEncryptionConfiguration +func (c *IoTSiteWise) DescribeDefaultEncryptionConfiguration(input *DescribeDefaultEncryptionConfigurationInput) (*DescribeDefaultEncryptionConfigurationOutput, error) { + req, out := c.DescribeDefaultEncryptionConfigurationRequest(input) + return out, req.Send() +} + +// DescribeDefaultEncryptionConfigurationWithContext is the same as DescribeDefaultEncryptionConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDefaultEncryptionConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) DescribeDefaultEncryptionConfigurationWithContext(ctx aws.Context, input *DescribeDefaultEncryptionConfigurationInput, opts ...request.Option) (*DescribeDefaultEncryptionConfigurationOutput, error) { + req, out := c.DescribeDefaultEncryptionConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeGateway = "DescribeGateway" // DescribeGatewayRequest generates a "aws/request.Request" representing the @@ -5019,6 +5113,111 @@ func (c *IoTSiteWise) ListTagsForResourceWithContext(ctx aws.Context, input *Lis return out, req.Send() } +const opPutDefaultEncryptionConfiguration = "PutDefaultEncryptionConfiguration" + +// PutDefaultEncryptionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutDefaultEncryptionConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDefaultEncryptionConfiguration for more information on using the PutDefaultEncryptionConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutDefaultEncryptionConfigurationRequest method. +// req, resp := client.PutDefaultEncryptionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/PutDefaultEncryptionConfiguration +func (c *IoTSiteWise) PutDefaultEncryptionConfigurationRequest(input *PutDefaultEncryptionConfigurationInput) (req *request.Request, output *PutDefaultEncryptionConfigurationOutput) { + op := &request.Operation{ + Name: opPutDefaultEncryptionConfiguration, + HTTPMethod: "POST", + HTTPPath: "/configuration/account/encryption", + } + + if input == nil { + input = &PutDefaultEncryptionConfigurationInput{} + } + + output = &PutDefaultEncryptionConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDefaultEncryptionConfiguration API operation for AWS IoT SiteWise. +// +// Sets the default encryption configuration for the AWS account. For more information, +// see Key management (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/key-management.html) +// in the AWS IoT SiteWise User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT SiteWise's +// API operation PutDefaultEncryptionConfiguration for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request isn't valid. This can occur if your request contains malformed +// JSON or unsupported characters. Check your request and try again. +// +// * InternalFailureException +// AWS IoT SiteWise can't process your request right now. Try again later. +// +// * ThrottlingException +// Your request exceeded a rate limit. For example, you might have exceeded +// the number of AWS IoT SiteWise assets that can be created per second, the +// allowed number of messages per second, and so on. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the AWS IoT SiteWise User Guide. +// +// * LimitExceededException +// You've reached the limit for a resource. For example, this can occur if you're +// trying to associate more than the allowed number of child assets or attempting +// to create more than the allowed number of properties for an asset model. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the AWS IoT SiteWise User Guide. +// +// * ConflictingOperationException +// Your request has conflicting operations. This can occur if you're trying +// to perform more than one operation on the same resource at the same time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/PutDefaultEncryptionConfiguration +func (c *IoTSiteWise) PutDefaultEncryptionConfiguration(input *PutDefaultEncryptionConfigurationInput) (*PutDefaultEncryptionConfigurationOutput, error) { + req, out := c.PutDefaultEncryptionConfigurationRequest(input) + return out, req.Send() +} + +// PutDefaultEncryptionConfigurationWithContext is the same as PutDefaultEncryptionConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutDefaultEncryptionConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) PutDefaultEncryptionConfigurationWithContext(ctx aws.Context, input *PutDefaultEncryptionConfigurationInput, opts ...request.Option) (*PutDefaultEncryptionConfigurationOutput, error) { + req, out := c.PutDefaultEncryptionConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutLoggingOptions = "PutLoggingOptions" // PutLoggingOptionsRequest generates a "aws/request.Request" representing the @@ -7902,6 +8101,69 @@ func (s *BatchPutAssetPropertyValueOutput) SetErrorEntries(v []*BatchPutAssetPro return s } +type ConfigurationErrorDetails struct { + _ struct{} `type:"structure"` + + // Code is a required field + Code *string `locationName:"code" type:"string" required:"true" enum:"ErrorCode"` + + // Message is a required field + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfigurationErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationErrorDetails) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ConfigurationErrorDetails) SetCode(v string) *ConfigurationErrorDetails { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ConfigurationErrorDetails) SetMessage(v string) *ConfigurationErrorDetails { + s.Message = &v + return s +} + +type ConfigurationStatus struct { + _ struct{} `type:"structure"` + + Error *ConfigurationErrorDetails `locationName:"error" type:"structure"` + + // State is a required field + State *string `locationName:"state" type:"string" required:"true" enum:"ConfigurationState"` +} + +// String returns the string representation +func (s ConfigurationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationStatus) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *ConfigurationStatus) SetError(v *ConfigurationErrorDetails) *ConfigurationStatus { + s.Error = v + return s +} + +// SetState sets the State field's value. +func (s *ConfigurationStatus) SetState(v string) *ConfigurationStatus { + s.State = &v + return s +} + // Your request has conflicting operations. This can occur if you're trying // to perform more than one operation on the same resource at the same time. type ConflictingOperationException struct { @@ -8951,7 +9213,7 @@ type CreatePresignedPortalUrlInput struct { // The duration (in seconds) for which the session at the URL is valid. // - // Default: 900 seconds (15 minutes) + // Default: 43,200 seconds (12 hours) SessionDurationSeconds *int64 `location:"querystring" locationName:"sessionDurationSeconds" min:"900" type:"integer"` } @@ -8999,10 +9261,10 @@ func (s *CreatePresignedPortalUrlInput) SetSessionDurationSeconds(v int64) *Crea type CreatePresignedPortalUrlOutput struct { _ struct{} `type:"structure"` - // The pre-signed URL to the portal. The URL contains the portal ID and a session + // The pre-signed URL to the portal. The URL contains the portal ID and an authentication // token that lets you access the portal. The URL has the following format. // - // https://.app.iotsitewise.aws/auth?token= + // https://.app.iotsitewise.aws/iam?token= // // PresignedPortalUrl is a required field PresignedPortalUrl *string `locationName:"presignedPortalUrl" min:"1" type:"string" required:"true"` @@ -10448,6 +10710,67 @@ func (s *DescribeDashboardOutput) SetProjectId(v string) *DescribeDashboardOutpu return s } +type DescribeDefaultEncryptionConfigurationInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeDefaultEncryptionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultEncryptionConfigurationInput) GoString() string { + return s.String() +} + +type DescribeDefaultEncryptionConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The status of the account configuration. This contains the ConfigurationState. + // If there's an error, it also contains the ErrorDetails. + // + // ConfigurationStatus is a required field + ConfigurationStatus *ConfigurationStatus `locationName:"configurationStatus" type:"structure" required:"true"` + + // The type of encryption used for the encryption configuration. + // + // EncryptionType is a required field + EncryptionType *string `locationName:"encryptionType" type:"string" required:"true" enum:"EncryptionType"` + + // The key ARN of the customer managed customer master key (CMK) used for AWS + // KMS encryption if you use KMS_BASED_ENCRYPTION. + KmsKeyArn *string `locationName:"kmsKeyArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDefaultEncryptionConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDefaultEncryptionConfigurationOutput) GoString() string { + return s.String() +} + +// SetConfigurationStatus sets the ConfigurationStatus field's value. +func (s *DescribeDefaultEncryptionConfigurationOutput) SetConfigurationStatus(v *ConfigurationStatus) *DescribeDefaultEncryptionConfigurationOutput { + s.ConfigurationStatus = v + return s +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *DescribeDefaultEncryptionConfigurationOutput) SetEncryptionType(v string) *DescribeDefaultEncryptionConfigurationOutput { + s.EncryptionType = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *DescribeDefaultEncryptionConfigurationOutput) SetKmsKeyArn(v string) *DescribeDefaultEncryptionConfigurationOutput { + s.KmsKeyArn = &v + return s +} + type DescribeGatewayCapabilityConfigurationInput struct { _ struct{} `type:"structure"` @@ -14328,6 +14651,103 @@ func (s *PutAssetPropertyValueEntry) SetPropertyValues(v []*AssetPropertyValue) return s } +type PutDefaultEncryptionConfigurationInput struct { + _ struct{} `type:"structure"` + + // The type of encryption used for the encryption configuration. + // + // EncryptionType is a required field + EncryptionType *string `locationName:"encryptionType" type:"string" required:"true" enum:"EncryptionType"` + + // The Key ID of the customer managed customer master key (CMK) used for AWS + // KMS encryption. This is required if you use KMS_BASED_ENCRYPTION. + KmsKeyId *string `locationName:"kmsKeyId" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutDefaultEncryptionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDefaultEncryptionConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDefaultEncryptionConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDefaultEncryptionConfigurationInput"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *PutDefaultEncryptionConfigurationInput) SetEncryptionType(v string) *PutDefaultEncryptionConfigurationInput { + s.EncryptionType = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *PutDefaultEncryptionConfigurationInput) SetKmsKeyId(v string) *PutDefaultEncryptionConfigurationInput { + s.KmsKeyId = &v + return s +} + +type PutDefaultEncryptionConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The status of the account configuration. This contains the ConfigurationState. + // If there is an error, it also contains the ErrorDetails. + // + // ConfigurationStatus is a required field + ConfigurationStatus *ConfigurationStatus `locationName:"configurationStatus" type:"structure" required:"true"` + + // The type of encryption used for the encryption configuration. + // + // EncryptionType is a required field + EncryptionType *string `locationName:"encryptionType" type:"string" required:"true" enum:"EncryptionType"` + + // The Key ARN of the AWS KMS CMK used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION. + KmsKeyArn *string `locationName:"kmsKeyArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutDefaultEncryptionConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDefaultEncryptionConfigurationOutput) GoString() string { + return s.String() +} + +// SetConfigurationStatus sets the ConfigurationStatus field's value. +func (s *PutDefaultEncryptionConfigurationOutput) SetConfigurationStatus(v *ConfigurationStatus) *PutDefaultEncryptionConfigurationOutput { + s.ConfigurationStatus = v + return s +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *PutDefaultEncryptionConfigurationOutput) SetEncryptionType(v string) *PutDefaultEncryptionConfigurationOutput { + s.EncryptionType = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *PutDefaultEncryptionConfigurationOutput) SetKmsKeyArn(v string) *PutDefaultEncryptionConfigurationOutput { + s.KmsKeyArn = &v + return s +} + type PutLoggingOptionsInput struct { _ struct{} `type:"structure"` @@ -16497,6 +16917,42 @@ func CapabilitySyncStatus_Values() []string { } } +const ( + // ConfigurationStateActive is a ConfigurationState enum value + ConfigurationStateActive = "ACTIVE" + + // ConfigurationStateUpdateInProgress is a ConfigurationState enum value + ConfigurationStateUpdateInProgress = "UPDATE_IN_PROGRESS" + + // ConfigurationStateUpdateFailed is a ConfigurationState enum value + ConfigurationStateUpdateFailed = "UPDATE_FAILED" +) + +// ConfigurationState_Values returns all elements of the ConfigurationState enum +func ConfigurationState_Values() []string { + return []string{ + ConfigurationStateActive, + ConfigurationStateUpdateInProgress, + ConfigurationStateUpdateFailed, + } +} + +const ( + // EncryptionTypeSitewiseDefaultEncryption is a EncryptionType enum value + EncryptionTypeSitewiseDefaultEncryption = "SITEWISE_DEFAULT_ENCRYPTION" + + // EncryptionTypeKmsBasedEncryption is a EncryptionType enum value + EncryptionTypeKmsBasedEncryption = "KMS_BASED_ENCRYPTION" +) + +// EncryptionType_Values returns all elements of the EncryptionType enum +func EncryptionType_Values() []string { + return []string{ + EncryptionTypeSitewiseDefaultEncryption, + EncryptionTypeKmsBasedEncryption, + } +} + const ( // ErrorCodeValidationError is a ErrorCode enum value ErrorCodeValidationError = "VALIDATION_ERROR" diff --git a/service/iotsitewise/iotsitewiseiface/interface.go b/service/iotsitewise/iotsitewiseiface/interface.go index 7266887c92c..63aec52d293 100644 --- a/service/iotsitewise/iotsitewiseiface/interface.go +++ b/service/iotsitewise/iotsitewiseiface/interface.go @@ -156,6 +156,10 @@ type IoTSiteWiseAPI interface { DescribeDashboardWithContext(aws.Context, *iotsitewise.DescribeDashboardInput, ...request.Option) (*iotsitewise.DescribeDashboardOutput, error) DescribeDashboardRequest(*iotsitewise.DescribeDashboardInput) (*request.Request, *iotsitewise.DescribeDashboardOutput) + DescribeDefaultEncryptionConfiguration(*iotsitewise.DescribeDefaultEncryptionConfigurationInput) (*iotsitewise.DescribeDefaultEncryptionConfigurationOutput, error) + DescribeDefaultEncryptionConfigurationWithContext(aws.Context, *iotsitewise.DescribeDefaultEncryptionConfigurationInput, ...request.Option) (*iotsitewise.DescribeDefaultEncryptionConfigurationOutput, error) + DescribeDefaultEncryptionConfigurationRequest(*iotsitewise.DescribeDefaultEncryptionConfigurationInput) (*request.Request, *iotsitewise.DescribeDefaultEncryptionConfigurationOutput) + DescribeGateway(*iotsitewise.DescribeGatewayInput) (*iotsitewise.DescribeGatewayOutput, error) DescribeGatewayWithContext(aws.Context, *iotsitewise.DescribeGatewayInput, ...request.Option) (*iotsitewise.DescribeGatewayOutput, error) DescribeGatewayRequest(*iotsitewise.DescribeGatewayInput) (*request.Request, *iotsitewise.DescribeGatewayOutput) @@ -265,6 +269,10 @@ type IoTSiteWiseAPI interface { ListTagsForResourceWithContext(aws.Context, *iotsitewise.ListTagsForResourceInput, ...request.Option) (*iotsitewise.ListTagsForResourceOutput, error) ListTagsForResourceRequest(*iotsitewise.ListTagsForResourceInput) (*request.Request, *iotsitewise.ListTagsForResourceOutput) + PutDefaultEncryptionConfiguration(*iotsitewise.PutDefaultEncryptionConfigurationInput) (*iotsitewise.PutDefaultEncryptionConfigurationOutput, error) + PutDefaultEncryptionConfigurationWithContext(aws.Context, *iotsitewise.PutDefaultEncryptionConfigurationInput, ...request.Option) (*iotsitewise.PutDefaultEncryptionConfigurationOutput, error) + PutDefaultEncryptionConfigurationRequest(*iotsitewise.PutDefaultEncryptionConfigurationInput) (*request.Request, *iotsitewise.PutDefaultEncryptionConfigurationOutput) + PutLoggingOptions(*iotsitewise.PutLoggingOptionsInput) (*iotsitewise.PutLoggingOptionsOutput, error) PutLoggingOptionsWithContext(aws.Context, *iotsitewise.PutLoggingOptionsInput, ...request.Option) (*iotsitewise.PutLoggingOptionsOutput, error) PutLoggingOptionsRequest(*iotsitewise.PutLoggingOptionsInput) (*request.Request, *iotsitewise.PutLoggingOptionsOutput) diff --git a/service/lexmodelbuildingservice/api.go b/service/lexmodelbuildingservice/api.go index b7ee2293125..86cde3d2917 100644 --- a/service/lexmodelbuildingservice/api.go +++ b/service/lexmodelbuildingservice/api.go @@ -13083,6 +13083,9 @@ const ( // LocaleEnUs is a Locale enum value LocaleEnUs = "en-US" + // LocaleEs419 is a Locale enum value + LocaleEs419 = "es-419" + // LocaleEsEs is a Locale enum value LocaleEsEs = "es-ES" @@ -13106,6 +13109,7 @@ func Locale_Values() []string { LocaleEnAu, LocaleEnGb, LocaleEnUs, + LocaleEs419, LocaleEsEs, LocaleEsUs, LocaleFrFr, diff --git a/service/mediaconvert/api.go b/service/mediaconvert/api.go index b8bca092fc9..3446d6e24d0 100644 --- a/service/mediaconvert/api.go +++ b/service/mediaconvert/api.go @@ -5387,8 +5387,10 @@ type CmafGroupSettings struct { // than the manifest file. BaseUrl *string `locationName:"baseUrl" type:"string"` - // When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client - // from saving media segments for later replay. + // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no + // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching + // in your video distribution set up. For example, use the Cache-Control http + // header. ClientCache *string `locationName:"clientCache" type:"string" enum:"CmafClientCache"` // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist @@ -5642,6 +5644,21 @@ func (s *CmafGroupSettings) SetWriteSegmentTimelineInRepresentation(v string) *C type CmfcSettings struct { _ struct{} `type:"structure"` + // Specify this setting only when your output will be consumed by a downstream + // repackaging workflow that is sensitive to very small duration differences + // between video and audio. For this situation, choose Match video duration + // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default + // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, + // MediaConvert pads the output audio streams with silence or trims them to + // ensure that the total duration of each audio stream is at least as long as + // the total duration of the video stream. After padding or trimming, the audio + // stream duration is no more than one frame longer than the video stream. MediaConvert + // applies audio padding or trimming only to the end of the last segment of + // the output. For unsegmented outputs, MediaConvert adds padding only to the + // end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. + AudioDuration *string `locationName:"audioDuration" type:"string" enum:"CmfcAudioDuration"` + // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting @@ -5665,6 +5682,12 @@ func (s CmfcSettings) GoString() string { return s.String() } +// SetAudioDuration sets the AudioDuration field's value. +func (s *CmfcSettings) SetAudioDuration(v string) *CmfcSettings { + s.AudioDuration = &v + return s +} + // SetScte35Esam sets the Scte35Esam field's value. func (s *CmfcSettings) SetScte35Esam(v string) *CmfcSettings { s.Scte35Esam = &v @@ -6035,11 +6058,15 @@ type CreateJobInput struct { StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` // Optional. The tags that you want to add to the resource. You can tag resources - // with a key-value pair or with only a key. + // with a key-value pair or with only a key. Use standard AWS tags on your job + // for automatic integration with AWS services and for custom integrations and + // workflows. Tags map[string]*string `locationName:"tags" type:"map"` // Optional. User-defined metadata that you want to associate with an MediaConvert - // job. You specify metadata in key/value pairs. + // job. You specify metadata in key/value pairs. Use only for existing integrations + // or workflows that rely on job metadata tags. Otherwise, we recommend that + // you use standard AWS tags. UserMetadata map[string]*string `locationName:"userMetadata" type:"map"` } @@ -6765,6 +6792,19 @@ type DashIsoGroupSettings struct { // playout. MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"` + // Keep this setting at the default value of 0, unless you are troubleshooting + // a problem with how devices play back the end of your video asset. If you + // know that player devices are hanging on the final segment of your video because + // the length of your final segment is too short, use this setting to specify + // a minimum final segment length, in seconds. Choose a value that is greater + // than or equal to 1 and less than your segment length. When you specify a + // value for this setting, the encoder will combine any final segment that is + // shorter than the length that you specify with the previous segment. For example, + // your segment length is 3 seconds and your final segment is .5 seconds without + // a minimum final segment length; when you set the minimum final segment length + // to 1, your final segment is 3.5 seconds. + MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"` + // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), @@ -6879,6 +6919,12 @@ func (s *DashIsoGroupSettings) SetMinBufferTime(v int64) *DashIsoGroupSettings { return s } +// SetMinFinalSegmentLength sets the MinFinalSegmentLength field's value. +func (s *DashIsoGroupSettings) SetMinFinalSegmentLength(v float64) *DashIsoGroupSettings { + s.MinFinalSegmentLength = &v + return s +} + // SetMpdProfile sets the MpdProfile field's value. func (s *DashIsoGroupSettings) SetMpdProfile(v string) *DashIsoGroupSettings { s.MpdProfile = &v @@ -10854,8 +10900,10 @@ type HlsGroupSettings struct { // line from the manifest. CaptionLanguageSetting *string `locationName:"captionLanguageSetting" type:"string" enum:"HlsCaptionLanguageSetting"` - // When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client - // from saving media segments for later replay. + // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no + // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching + // in your video distribution set up. For example, use the Cache-Control http + // header. ClientCache *string `locationName:"clientCache" type:"string" enum:"HlsClientCache"` // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist @@ -13767,6 +13815,21 @@ type M2tsSettings struct { // Selects between the DVB and ATSC buffer models for Dolby Digital audio. AudioBufferModel *string `locationName:"audioBufferModel" type:"string" enum:"M2tsAudioBufferModel"` + // Specify this setting only when your output will be consumed by a downstream + // repackaging workflow that is sensitive to very small duration differences + // between video and audio. For this situation, choose Match video duration + // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default + // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, + // MediaConvert pads the output audio streams with silence or trims them to + // ensure that the total duration of each audio stream is at least as long as + // the total duration of the video stream. After padding or trimming, the audio + // stream duration is no more than one frame longer than the video stream. MediaConvert + // applies audio padding or trimming only to the end of the last segment of + // the output. For unsegmented outputs, MediaConvert adds padding only to the + // end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. + AudioDuration *string `locationName:"audioDuration" type:"string" enum:"M2tsAudioDuration"` + // The number of audio frames to insert for each PES packet. AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` @@ -14011,6 +14074,12 @@ func (s *M2tsSettings) SetAudioBufferModel(v string) *M2tsSettings { return s } +// SetAudioDuration sets the AudioDuration field's value. +func (s *M2tsSettings) SetAudioDuration(v string) *M2tsSettings { + s.AudioDuration = &v + return s +} + // SetAudioFramesPerPes sets the AudioFramesPerPes field's value. func (s *M2tsSettings) SetAudioFramesPerPes(v int64) *M2tsSettings { s.AudioFramesPerPes = &v @@ -14225,6 +14294,21 @@ func (s *M2tsSettings) SetVideoPid(v int64) *M2tsSettings { type M3u8Settings struct { _ struct{} `type:"structure"` + // Specify this setting only when your output will be consumed by a downstream + // repackaging workflow that is sensitive to very small duration differences + // between video and audio. For this situation, choose Match video duration + // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default + // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, + // MediaConvert pads the output audio streams with silence or trims them to + // ensure that the total duration of each audio stream is at least as long as + // the total duration of the video stream. After padding or trimming, the audio + // stream duration is no more than one frame longer than the video stream. MediaConvert + // applies audio padding or trimming only to the end of the last segment of + // the output. For unsegmented outputs, MediaConvert adds padding only to the + // end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. + AudioDuration *string `locationName:"audioDuration" type:"string" enum:"M3u8AudioDuration"` + // The number of audio frames to insert for each PES packet. AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` @@ -14330,6 +14414,12 @@ func (s *M3u8Settings) Validate() error { return nil } +// SetAudioDuration sets the AudioDuration field's value. +func (s *M3u8Settings) SetAudioDuration(v string) *M3u8Settings { + s.AudioDuration = &v + return s +} + // SetAudioFramesPerPes sets the AudioFramesPerPes field's value. func (s *M3u8Settings) SetAudioFramesPerPes(v int64) *M3u8Settings { s.AudioFramesPerPes = &v @@ -14859,6 +14949,21 @@ func (s *Mp3Settings) SetVbrQuality(v int64) *Mp3Settings { type Mp4Settings struct { _ struct{} `type:"structure"` + // Specify this setting only when your output will be consumed by a downstream + // repackaging workflow that is sensitive to very small duration differences + // between video and audio. For this situation, choose Match video duration + // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default + // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, + // MediaConvert pads the output audio streams with silence or trims them to + // ensure that the total duration of each audio stream is at least as long as + // the total duration of the video stream. After padding or trimming, the audio + // stream duration is no more than one frame longer than the video stream. MediaConvert + // applies audio padding or trimming only to the end of the last segment of + // the output. For unsegmented outputs, MediaConvert adds padding only to the + // end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. + AudioDuration *string `locationName:"audioDuration" type:"string" enum:"CmfcAudioDuration"` + // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per @@ -14896,6 +15001,12 @@ func (s Mp4Settings) GoString() string { return s.String() } +// SetAudioDuration sets the AudioDuration field's value. +func (s *Mp4Settings) SetAudioDuration(v string) *Mp4Settings { + s.AudioDuration = &v + return s +} + // SetCslgAtom sets the CslgAtom field's value. func (s *Mp4Settings) SetCslgAtom(v string) *Mp4Settings { s.CslgAtom = &v @@ -14930,6 +15041,29 @@ func (s *Mp4Settings) SetMp4MajorBrand(v string) *Mp4Settings { type MpdSettings struct { _ struct{} `type:"structure"` + // Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH + // manifest with elements for embedded 608 captions. This markup isn't generally + // required, but some video players require it to discover and play embedded + // 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements + // out. When you enable this setting, this is the markup that MediaConvert includes + // in your manifest: + AccessibilityCaptionHints *string `locationName:"accessibilityCaptionHints" type:"string" enum:"MpdAccessibilityCaptionHints"` + + // Specify this setting only when your output will be consumed by a downstream + // repackaging workflow that is sensitive to very small duration differences + // between video and audio. For this situation, choose Match video duration + // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default + // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, + // MediaConvert pads the output audio streams with silence or trims them to + // ensure that the total duration of each audio stream is at least as long as + // the total duration of the video stream. After padding or trimming, the audio + // stream duration is no more than one frame longer than the video stream. MediaConvert + // applies audio padding or trimming only to the end of the last segment of + // the output. For unsegmented outputs, MediaConvert adds padding only to the + // end of the file. When you keep the default value, any minor discrepancies + // between audio and video duration will depend on your output audio codec. + AudioDuration *string `locationName:"audioDuration" type:"string" enum:"MpdAudioDuration"` + // Use this setting only in DASH output groups that include sidecar TTML or // IMSC captions. You specify sidecar captions in a separate output from your // audio and video. Choose Raw (RAW) for captions in a single XML file in a @@ -14961,6 +15095,18 @@ func (s MpdSettings) GoString() string { return s.String() } +// SetAccessibilityCaptionHints sets the AccessibilityCaptionHints field's value. +func (s *MpdSettings) SetAccessibilityCaptionHints(v string) *MpdSettings { + s.AccessibilityCaptionHints = &v + return s +} + +// SetAudioDuration sets the AudioDuration field's value. +func (s *MpdSettings) SetAudioDuration(v string) *MpdSettings { + s.AudioDuration = &v + return s +} + // SetCaptionContainerType sets the CaptionContainerType field's value. func (s *MpdSettings) SetCaptionContainerType(v string) *MpdSettings { s.CaptionContainerType = &v @@ -21814,8 +21960,10 @@ func CaptionSourceType_Values() []string { } } -// When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client -// from saving media segments for later replay. +// Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no +// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching +// in your video distribution set up. For example, use the Cache-Control http +// header. const ( // CmafClientCacheDisabled is a CmafClientCache enum value CmafClientCacheDisabled = "DISABLED" @@ -22054,6 +22202,35 @@ func CmafWriteSegmentTimelineInRepresentation_Values() []string { } } +// Specify this setting only when your output will be consumed by a downstream +// repackaging workflow that is sensitive to very small duration differences +// between video and audio. For this situation, choose Match video duration +// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default +// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, +// MediaConvert pads the output audio streams with silence or trims them to +// ensure that the total duration of each audio stream is at least as long as +// the total duration of the video stream. After padding or trimming, the audio +// stream duration is no more than one frame longer than the video stream. MediaConvert +// applies audio padding or trimming only to the end of the last segment of +// the output. For unsegmented outputs, MediaConvert adds padding only to the +// end of the file. When you keep the default value, any minor discrepancies +// between audio and video duration will depend on your output audio codec. +const ( + // CmfcAudioDurationDefaultCodecDuration is a CmfcAudioDuration enum value + CmfcAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" + + // CmfcAudioDurationMatchVideoDuration is a CmfcAudioDuration enum value + CmfcAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION" +) + +// CmfcAudioDuration_Values returns all elements of the CmfcAudioDuration enum +func CmfcAudioDuration_Values() []string { + return []string{ + CmfcAudioDurationDefaultCodecDuration, + CmfcAudioDurationMatchVideoDuration, + } +} + // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting @@ -24761,8 +24938,10 @@ func HlsCaptionLanguageSetting_Values() []string { } } -// When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client -// from saving media segments for later replay. +// Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no +// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching +// in your video distribution set up. For example, use the Cache-Control http +// header. const ( // HlsClientCacheDisabled is a HlsClientCache enum value HlsClientCacheDisabled = "DISABLED" @@ -26093,6 +26272,35 @@ func M2tsAudioBufferModel_Values() []string { } } +// Specify this setting only when your output will be consumed by a downstream +// repackaging workflow that is sensitive to very small duration differences +// between video and audio. For this situation, choose Match video duration +// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default +// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, +// MediaConvert pads the output audio streams with silence or trims them to +// ensure that the total duration of each audio stream is at least as long as +// the total duration of the video stream. After padding or trimming, the audio +// stream duration is no more than one frame longer than the video stream. MediaConvert +// applies audio padding or trimming only to the end of the last segment of +// the output. For unsegmented outputs, MediaConvert adds padding only to the +// end of the file. When you keep the default value, any minor discrepancies +// between audio and video duration will depend on your output audio codec. +const ( + // M2tsAudioDurationDefaultCodecDuration is a M2tsAudioDuration enum value + M2tsAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" + + // M2tsAudioDurationMatchVideoDuration is a M2tsAudioDuration enum value + M2tsAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION" +) + +// M2tsAudioDuration_Values returns all elements of the M2tsAudioDuration enum +func M2tsAudioDuration_Values() []string { + return []string{ + M2tsAudioDurationDefaultCodecDuration, + M2tsAudioDurationMatchVideoDuration, + } +} + // Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, // use multiplex buffer model. If set to NONE, this can lead to lower latency, // but low-memory devices may not be able to play back the stream without interruptions. @@ -26334,6 +26542,35 @@ func M2tsSegmentationStyle_Values() []string { } } +// Specify this setting only when your output will be consumed by a downstream +// repackaging workflow that is sensitive to very small duration differences +// between video and audio. For this situation, choose Match video duration +// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default +// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, +// MediaConvert pads the output audio streams with silence or trims them to +// ensure that the total duration of each audio stream is at least as long as +// the total duration of the video stream. After padding or trimming, the audio +// stream duration is no more than one frame longer than the video stream. MediaConvert +// applies audio padding or trimming only to the end of the last segment of +// the output. For unsegmented outputs, MediaConvert adds padding only to the +// end of the file. When you keep the default value, any minor discrepancies +// between audio and video duration will depend on your output audio codec. +const ( + // M3u8AudioDurationDefaultCodecDuration is a M3u8AudioDuration enum value + M3u8AudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" + + // M3u8AudioDurationMatchVideoDuration is a M3u8AudioDuration enum value + M3u8AudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION" +) + +// M3u8AudioDuration_Values returns all elements of the M3u8AudioDuration enum +func M3u8AudioDuration_Values() []string { + return []string{ + M3u8AudioDurationDefaultCodecDuration, + M3u8AudioDurationMatchVideoDuration, + } +} + // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. const ( @@ -26600,6 +26837,57 @@ func Mp4MoovPlacement_Values() []string { } } +// Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH +// manifest with elements for embedded 608 captions. This markup isn't generally +// required, but some video players require it to discover and play embedded +// 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements +// out. When you enable this setting, this is the markup that MediaConvert includes +// in your manifest: +const ( + // MpdAccessibilityCaptionHintsInclude is a MpdAccessibilityCaptionHints enum value + MpdAccessibilityCaptionHintsInclude = "INCLUDE" + + // MpdAccessibilityCaptionHintsExclude is a MpdAccessibilityCaptionHints enum value + MpdAccessibilityCaptionHintsExclude = "EXCLUDE" +) + +// MpdAccessibilityCaptionHints_Values returns all elements of the MpdAccessibilityCaptionHints enum +func MpdAccessibilityCaptionHints_Values() []string { + return []string{ + MpdAccessibilityCaptionHintsInclude, + MpdAccessibilityCaptionHintsExclude, + } +} + +// Specify this setting only when your output will be consumed by a downstream +// repackaging workflow that is sensitive to very small duration differences +// between video and audio. For this situation, choose Match video duration +// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default +// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, +// MediaConvert pads the output audio streams with silence or trims them to +// ensure that the total duration of each audio stream is at least as long as +// the total duration of the video stream. After padding or trimming, the audio +// stream duration is no more than one frame longer than the video stream. MediaConvert +// applies audio padding or trimming only to the end of the last segment of +// the output. For unsegmented outputs, MediaConvert adds padding only to the +// end of the file. When you keep the default value, any minor discrepancies +// between audio and video duration will depend on your output audio codec. +const ( + // MpdAudioDurationDefaultCodecDuration is a MpdAudioDuration enum value + MpdAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" + + // MpdAudioDurationMatchVideoDuration is a MpdAudioDuration enum value + MpdAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION" +) + +// MpdAudioDuration_Values returns all elements of the MpdAudioDuration enum +func MpdAudioDuration_Values() []string { + return []string{ + MpdAudioDurationDefaultCodecDuration, + MpdAudioDurationMatchVideoDuration, + } +} + // Use this setting only in DASH output groups that include sidecar TTML or // IMSC captions. You specify sidecar captions in a separate output from your // audio and video. Choose Raw (RAW) for captions in a single XML file in a diff --git a/service/mwaa/api.go b/service/mwaa/api.go new file mode 100644 index 00000000000..7233ec19009 --- /dev/null +++ b/service/mwaa/api.go @@ -0,0 +1,3533 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package mwaa + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opCreateCliToken = "CreateCliToken" + +// CreateCliTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateCliToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCliToken for more information on using the CreateCliToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCliTokenRequest method. +// req, resp := client.CreateCliTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/CreateCliToken +func (c *MWAA) CreateCliTokenRequest(input *CreateCliTokenInput) (req *request.Request, output *CreateCliTokenOutput) { + op := &request.Operation{ + Name: opCreateCliToken, + HTTPMethod: "POST", + HTTPPath: "/clitoken/{Name}", + } + + if input == nil { + input = &CreateCliTokenInput{} + } + + output = &CreateCliTokenOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("env.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// CreateCliToken API operation for AmazonMWAA. +// +// Create a CLI token to use Airflow CLI. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation CreateCliToken for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// ResourceNotFoundException: The resource is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/CreateCliToken +func (c *MWAA) CreateCliToken(input *CreateCliTokenInput) (*CreateCliTokenOutput, error) { + req, out := c.CreateCliTokenRequest(input) + return out, req.Send() +} + +// CreateCliTokenWithContext is the same as CreateCliToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCliToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) CreateCliTokenWithContext(ctx aws.Context, input *CreateCliTokenInput, opts ...request.Option) (*CreateCliTokenOutput, error) { + req, out := c.CreateCliTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateEnvironment = "CreateEnvironment" + +// CreateEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the CreateEnvironment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateEnvironment for more information on using the CreateEnvironment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateEnvironmentRequest method. +// req, resp := client.CreateEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/CreateEnvironment +func (c *MWAA) CreateEnvironmentRequest(input *CreateEnvironmentInput) (req *request.Request, output *CreateEnvironmentOutput) { + op := &request.Operation{ + Name: opCreateEnvironment, + HTTPMethod: "PUT", + HTTPPath: "/environments/{Name}", + } + + if input == nil { + input = &CreateEnvironmentInput{} + } + + output = &CreateEnvironmentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// CreateEnvironment API operation for AmazonMWAA. +// +// JSON blob that describes the environment to create. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation CreateEnvironment for usage and error information. +// +// Returned Error Types: +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/CreateEnvironment +func (c *MWAA) CreateEnvironment(input *CreateEnvironmentInput) (*CreateEnvironmentOutput, error) { + req, out := c.CreateEnvironmentRequest(input) + return out, req.Send() +} + +// CreateEnvironmentWithContext is the same as CreateEnvironment with the addition of +// the ability to pass a context and additional request options. +// +// See CreateEnvironment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) CreateEnvironmentWithContext(ctx aws.Context, input *CreateEnvironmentInput, opts ...request.Option) (*CreateEnvironmentOutput, error) { + req, out := c.CreateEnvironmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateWebLoginToken = "CreateWebLoginToken" + +// CreateWebLoginTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateWebLoginToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateWebLoginToken for more information on using the CreateWebLoginToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateWebLoginTokenRequest method. +// req, resp := client.CreateWebLoginTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/CreateWebLoginToken +func (c *MWAA) CreateWebLoginTokenRequest(input *CreateWebLoginTokenInput) (req *request.Request, output *CreateWebLoginTokenOutput) { + op := &request.Operation{ + Name: opCreateWebLoginToken, + HTTPMethod: "POST", + HTTPPath: "/webtoken/{Name}", + } + + if input == nil { + input = &CreateWebLoginTokenInput{} + } + + output = &CreateWebLoginTokenOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("env.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// CreateWebLoginToken API operation for AmazonMWAA. +// +// Create a JWT token to be used to login to Airflow Web UI with claims based +// Authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation CreateWebLoginToken for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA +// user guide to setup permissions to access the Web UI and CLI functionality. +// +// * ResourceNotFoundException +// ResourceNotFoundException: The resource is not available. +// +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/CreateWebLoginToken +func (c *MWAA) CreateWebLoginToken(input *CreateWebLoginTokenInput) (*CreateWebLoginTokenOutput, error) { + req, out := c.CreateWebLoginTokenRequest(input) + return out, req.Send() +} + +// CreateWebLoginTokenWithContext is the same as CreateWebLoginToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateWebLoginToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) CreateWebLoginTokenWithContext(ctx aws.Context, input *CreateWebLoginTokenInput, opts ...request.Option) (*CreateWebLoginTokenOutput, error) { + req, out := c.CreateWebLoginTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteEnvironment = "DeleteEnvironment" + +// DeleteEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEnvironment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteEnvironment for more information on using the DeleteEnvironment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteEnvironmentRequest method. +// req, resp := client.DeleteEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/DeleteEnvironment +func (c *MWAA) DeleteEnvironmentRequest(input *DeleteEnvironmentInput) (req *request.Request, output *DeleteEnvironmentOutput) { + op := &request.Operation{ + Name: opDeleteEnvironment, + HTTPMethod: "DELETE", + HTTPPath: "/environments/{Name}", + } + + if input == nil { + input = &DeleteEnvironmentInput{} + } + + output = &DeleteEnvironmentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// DeleteEnvironment API operation for AmazonMWAA. +// +// Delete an existing environment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation DeleteEnvironment for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// ResourceNotFoundException: The resource is not available. +// +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/DeleteEnvironment +func (c *MWAA) DeleteEnvironment(input *DeleteEnvironmentInput) (*DeleteEnvironmentOutput, error) { + req, out := c.DeleteEnvironmentRequest(input) + return out, req.Send() +} + +// DeleteEnvironmentWithContext is the same as DeleteEnvironment with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteEnvironment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) DeleteEnvironmentWithContext(ctx aws.Context, input *DeleteEnvironmentInput, opts ...request.Option) (*DeleteEnvironmentOutput, error) { + req, out := c.DeleteEnvironmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetEnvironment = "GetEnvironment" + +// GetEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the GetEnvironment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetEnvironment for more information on using the GetEnvironment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetEnvironmentRequest method. +// req, resp := client.GetEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/GetEnvironment +func (c *MWAA) GetEnvironmentRequest(input *GetEnvironmentInput) (req *request.Request, output *GetEnvironmentOutput) { + op := &request.Operation{ + Name: opGetEnvironment, + HTTPMethod: "GET", + HTTPPath: "/environments/{Name}", + } + + if input == nil { + input = &GetEnvironmentInput{} + } + + output = &GetEnvironmentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// GetEnvironment API operation for AmazonMWAA. +// +// Get details of an existing environment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation GetEnvironment for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// ResourceNotFoundException: The resource is not available. +// +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/GetEnvironment +func (c *MWAA) GetEnvironment(input *GetEnvironmentInput) (*GetEnvironmentOutput, error) { + req, out := c.GetEnvironmentRequest(input) + return out, req.Send() +} + +// GetEnvironmentWithContext is the same as GetEnvironment with the addition of +// the ability to pass a context and additional request options. +// +// See GetEnvironment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) GetEnvironmentWithContext(ctx aws.Context, input *GetEnvironmentInput, opts ...request.Option) (*GetEnvironmentOutput, error) { + req, out := c.GetEnvironmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListEnvironments = "ListEnvironments" + +// ListEnvironmentsRequest generates a "aws/request.Request" representing the +// client's request for the ListEnvironments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEnvironments for more information on using the ListEnvironments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEnvironmentsRequest method. +// req, resp := client.ListEnvironmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/ListEnvironments +func (c *MWAA) ListEnvironmentsRequest(input *ListEnvironmentsInput) (req *request.Request, output *ListEnvironmentsOutput) { + op := &request.Operation{ + Name: opListEnvironments, + HTTPMethod: "GET", + HTTPPath: "/environments", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEnvironmentsInput{} + } + + output = &ListEnvironmentsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// ListEnvironments API operation for AmazonMWAA. +// +// List Amazon MWAA Environments. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation ListEnvironments for usage and error information. +// +// Returned Error Types: +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/ListEnvironments +func (c *MWAA) ListEnvironments(input *ListEnvironmentsInput) (*ListEnvironmentsOutput, error) { + req, out := c.ListEnvironmentsRequest(input) + return out, req.Send() +} + +// ListEnvironmentsWithContext is the same as ListEnvironments with the addition of +// the ability to pass a context and additional request options. +// +// See ListEnvironments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) ListEnvironmentsWithContext(ctx aws.Context, input *ListEnvironmentsInput, opts ...request.Option) (*ListEnvironmentsOutput, error) { + req, out := c.ListEnvironmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEnvironmentsPages iterates over the pages of a ListEnvironments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEnvironments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEnvironments operation. +// pageNum := 0 +// err := client.ListEnvironmentsPages(params, +// func(page *mwaa.ListEnvironmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MWAA) ListEnvironmentsPages(input *ListEnvironmentsInput, fn func(*ListEnvironmentsOutput, bool) bool) error { + return c.ListEnvironmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEnvironmentsPagesWithContext same as ListEnvironmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) ListEnvironmentsPagesWithContext(ctx aws.Context, input *ListEnvironmentsInput, fn func(*ListEnvironmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEnvironmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEnvironmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListEnvironmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/ListTagsForResource +func (c *MWAA) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{ResourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// ListTagsForResource API operation for AmazonMWAA. +// +// List the tags for MWAA environments. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// ResourceNotFoundException: The resource is not available. +// +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/ListTagsForResource +func (c *MWAA) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPublishMetrics = "PublishMetrics" + +// PublishMetricsRequest generates a "aws/request.Request" representing the +// client's request for the PublishMetrics operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PublishMetrics for more information on using the PublishMetrics +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PublishMetricsRequest method. +// req, resp := client.PublishMetricsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/PublishMetrics +func (c *MWAA) PublishMetricsRequest(input *PublishMetricsInput) (req *request.Request, output *PublishMetricsOutput) { + op := &request.Operation{ + Name: opPublishMetrics, + HTTPMethod: "POST", + HTTPPath: "/metrics/environments/{EnvironmentName}", + } + + if input == nil { + input = &PublishMetricsInput{} + } + + output = &PublishMetricsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("ops.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// PublishMetrics API operation for AmazonMWAA. +// +// An operation for publishing metrics from the customers to the Ops plane. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation PublishMetrics for usage and error information. +// +// Returned Error Types: +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/PublishMetrics +func (c *MWAA) PublishMetrics(input *PublishMetricsInput) (*PublishMetricsOutput, error) { + req, out := c.PublishMetricsRequest(input) + return out, req.Send() +} + +// PublishMetricsWithContext is the same as PublishMetrics with the addition of +// the ability to pass a context and additional request options. +// +// See PublishMetrics for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) PublishMetricsWithContext(ctx aws.Context, input *PublishMetricsInput, opts ...request.Option) (*PublishMetricsOutput, error) { + req, out := c.PublishMetricsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/TagResource +func (c *MWAA) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{ResourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// TagResource API operation for AmazonMWAA. +// +// Add tag to the MWAA environments. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// ResourceNotFoundException: The resource is not available. +// +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/TagResource +func (c *MWAA) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/UntagResource +func (c *MWAA) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{ResourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// UntagResource API operation for AmazonMWAA. +// +// Remove a tag from the MWAA environments. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// ResourceNotFoundException: The resource is not available. +// +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/UntagResource +func (c *MWAA) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateEnvironment = "UpdateEnvironment" + +// UpdateEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEnvironment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateEnvironment for more information on using the UpdateEnvironment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateEnvironmentRequest method. +// req, resp := client.UpdateEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/UpdateEnvironment +func (c *MWAA) UpdateEnvironmentRequest(input *UpdateEnvironmentInput) (req *request.Request, output *UpdateEnvironmentOutput) { + op := &request.Operation{ + Name: opUpdateEnvironment, + HTTPMethod: "PATCH", + HTTPPath: "/environments/{Name}", + } + + if input == nil { + input = &UpdateEnvironmentInput{} + } + + output = &UpdateEnvironmentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("api.", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// UpdateEnvironment API operation for AmazonMWAA. +// +// Update an MWAA environment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonMWAA's +// API operation UpdateEnvironment for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// ResourceNotFoundException: The resource is not available. +// +// * ValidationException +// ValidationException: The provided input is not valid. +// +// * InternalServerException +// InternalServerException: An internal error has occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01/UpdateEnvironment +func (c *MWAA) UpdateEnvironment(input *UpdateEnvironmentInput) (*UpdateEnvironmentOutput, error) { + req, out := c.UpdateEnvironmentRequest(input) + return out, req.Send() +} + +// UpdateEnvironmentWithContext is the same as UpdateEnvironment with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateEnvironment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MWAA) UpdateEnvironmentWithContext(ctx aws.Context, input *UpdateEnvironmentInput, opts ...request.Option) (*UpdateEnvironmentOutput, error) { + req, out := c.UpdateEnvironmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA +// user guide to setup permissions to access the Web UI and CLI functionality. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateCliTokenInput struct { + _ struct{} `type:"structure"` + + // Create a CLI token request for a MWAA environment. + // + // Name is a required field + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCliTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCliTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCliTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCliTokenInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateCliTokenInput) SetName(v string) *CreateCliTokenInput { + s.Name = &v + return s +} + +type CreateCliTokenOutput struct { + _ struct{} `type:"structure"` + + // Create an Airflow CLI login token response for the provided JWT token. + CliToken *string `type:"string" sensitive:"true"` + + // Create an Airflow CLI login token response for the provided webserver hostname. + WebServerHostname *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateCliTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCliTokenOutput) GoString() string { + return s.String() +} + +// SetCliToken sets the CliToken field's value. +func (s *CreateCliTokenOutput) SetCliToken(v string) *CreateCliTokenOutput { + s.CliToken = &v + return s +} + +// SetWebServerHostname sets the WebServerHostname field's value. +func (s *CreateCliTokenOutput) SetWebServerHostname(v string) *CreateCliTokenOutput { + s.WebServerHostname = &v + return s +} + +// This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) +// API reference documentation to create an environment. For more information, +// see Get started with Amazon Managed Workflows for Apache Airflow (https://docs.aws.amazon.com/mwaa/latest/userguide/get-started.html). +type CreateEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The Apache Airflow configuration setting you want to override in your environment. + // For more information, see Environment configuration (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html). + AirflowConfigurationOptions map[string]*string `type:"map" sensitive:"true"` + + // The Apache Airflow version you want to use for your environment. + AirflowVersion *string `min:"1" type:"string"` + + // The relative path to the DAG folder on your Amazon S3 storage bucket. For + // example, dags. For more information, see Importing DAGs on Amazon MWAA (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). + // + // DagS3Path is a required field + DagS3Path *string `min:"1" type:"string" required:"true"` + + // The environment class you want to use for your environment. The environment + // class determines the size of the containers and database used for your Apache + // Airflow services. + EnvironmentClass *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the execution role for your environment. + // An execution role is an AWS Identity and Access Management (IAM) role that + // grants MWAA permission to access AWS services and resources used by your + // environment. For example, arn:aws:iam::123456789:role/my-execution-role. + // For more information, see Managing access to Amazon Managed Workflows for + // Apache Airflow (https://docs.aws.amazon.com/mwaa/latest/userguide/manage-access.html). + // + // ExecutionRoleArn is a required field + ExecutionRoleArn *string `min:"1" type:"string" required:"true"` + + // The AWS Key Management Service (KMS) key to encrypt and decrypt the data + // in your environment. You can use an AWS KMS key managed by MWAA, or a custom + // KMS key (advanced). For more information, see Customer master keys (CMKs) + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html?icmpid=docs_console_unmapped#master_keys) + // in the AWS KMS developer guide. + KmsKey *string `min:"1" type:"string"` + + // The Apache Airflow logs you want to send to Amazon CloudWatch Logs. + LoggingConfiguration *LoggingConfigurationInput `type:"structure"` + + // The maximum number of workers that you want to run in your environment. MWAA + // scales the number of Apache Airflow workers and the Fargate containers that + // run your tasks up to the number you specify in this field. When there are + // no more tasks running, and no more in the queue, MWAA disposes of the extra + // containers leaving the one worker that is included with your environment. + MaxWorkers *int64 `min:"1" type:"integer"` + + // The name of your MWAA environment. + // + // Name is a required field + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` + + // The VPC networking components you want to use for your environment. At least + // two private subnet identifiers and one VPC security group identifier are + // required to create an environment. For more information, see Creating the + // VPC network for a MWAA environment (https://docs.aws.amazon.com/mwaa/latest/userguide/vpc-mwaa.html). + // + // NetworkConfiguration is a required field + NetworkConfiguration *NetworkConfiguration `type:"structure" required:"true"` + + // The plugins.zip file version you want to use. + PluginsS3ObjectVersion *string `min:"1" type:"string"` + + // The relative path to the plugins.zip file on your Amazon S3 storage bucket. + // For example, plugins.zip. If a relative path is provided in the request, + // then PluginsS3ObjectVersion is required. For more information, see Importing + // DAGs on Amazon MWAA (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). + PluginsS3Path *string `min:"1" type:"string"` + + // The requirements.txt file version you want to use. + RequirementsS3ObjectVersion *string `min:"1" type:"string"` + + // The relative path to the requirements.txt file on your Amazon S3 storage + // bucket. For example, requirements.txt. If a relative path is provided in + // the request, then RequirementsS3ObjectVersion is required. For more information, + // see Importing DAGs on Amazon MWAA (https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). + RequirementsS3Path *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, + // arn:aws:s3:::airflow-mybucketname. + // + // SourceBucketArn is a required field + SourceBucketArn *string `min:"1" type:"string" required:"true"` + + // The metadata tags you want to attach to your environment. For more information, + // see Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + Tags map[string]*string `min:"1" type:"map"` + + // The networking access of your Apache Airflow web server. A public network + // allows your Airflow UI to be accessed over the Internet by users granted + // access in your IAM policy. A private network limits access of your Airflow + // UI to users within your VPC. For more information, see Creating the VPC network + // for a MWAA environment (https://docs.aws.amazon.com/mwaa/latest/userguide/vpc-mwaa.html). + WebserverAccessMode *string `type:"string" enum:"WebserverAccessMode"` + + // The day and time you want MWAA to start weekly maintenance updates on your + // environment. + WeeklyMaintenanceWindowStart *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEnvironmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEnvironmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEnvironmentInput"} + if s.AirflowVersion != nil && len(*s.AirflowVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AirflowVersion", 1)) + } + if s.DagS3Path == nil { + invalidParams.Add(request.NewErrParamRequired("DagS3Path")) + } + if s.DagS3Path != nil && len(*s.DagS3Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DagS3Path", 1)) + } + if s.EnvironmentClass != nil && len(*s.EnvironmentClass) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentClass", 1)) + } + if s.ExecutionRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionRoleArn")) + } + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleArn", 1)) + } + if s.KmsKey != nil && len(*s.KmsKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKey", 1)) + } + if s.MaxWorkers != nil && *s.MaxWorkers < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxWorkers", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.NetworkConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkConfiguration")) + } + if s.PluginsS3ObjectVersion != nil && len(*s.PluginsS3ObjectVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PluginsS3ObjectVersion", 1)) + } + if s.PluginsS3Path != nil && len(*s.PluginsS3Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PluginsS3Path", 1)) + } + if s.RequirementsS3ObjectVersion != nil && len(*s.RequirementsS3ObjectVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequirementsS3ObjectVersion", 1)) + } + if s.RequirementsS3Path != nil && len(*s.RequirementsS3Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequirementsS3Path", 1)) + } + if s.SourceBucketArn == nil { + invalidParams.Add(request.NewErrParamRequired("SourceBucketArn")) + } + if s.SourceBucketArn != nil && len(*s.SourceBucketArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceBucketArn", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.WeeklyMaintenanceWindowStart != nil && len(*s.WeeklyMaintenanceWindowStart) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceWindowStart", 1)) + } + if s.LoggingConfiguration != nil { + if err := s.LoggingConfiguration.Validate(); err != nil { + invalidParams.AddNested("LoggingConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.NetworkConfiguration != nil { + if err := s.NetworkConfiguration.Validate(); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAirflowConfigurationOptions sets the AirflowConfigurationOptions field's value. +func (s *CreateEnvironmentInput) SetAirflowConfigurationOptions(v map[string]*string) *CreateEnvironmentInput { + s.AirflowConfigurationOptions = v + return s +} + +// SetAirflowVersion sets the AirflowVersion field's value. +func (s *CreateEnvironmentInput) SetAirflowVersion(v string) *CreateEnvironmentInput { + s.AirflowVersion = &v + return s +} + +// SetDagS3Path sets the DagS3Path field's value. +func (s *CreateEnvironmentInput) SetDagS3Path(v string) *CreateEnvironmentInput { + s.DagS3Path = &v + return s +} + +// SetEnvironmentClass sets the EnvironmentClass field's value. +func (s *CreateEnvironmentInput) SetEnvironmentClass(v string) *CreateEnvironmentInput { + s.EnvironmentClass = &v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *CreateEnvironmentInput) SetExecutionRoleArn(v string) *CreateEnvironmentInput { + s.ExecutionRoleArn = &v + return s +} + +// SetKmsKey sets the KmsKey field's value. +func (s *CreateEnvironmentInput) SetKmsKey(v string) *CreateEnvironmentInput { + s.KmsKey = &v + return s +} + +// SetLoggingConfiguration sets the LoggingConfiguration field's value. +func (s *CreateEnvironmentInput) SetLoggingConfiguration(v *LoggingConfigurationInput) *CreateEnvironmentInput { + s.LoggingConfiguration = v + return s +} + +// SetMaxWorkers sets the MaxWorkers field's value. +func (s *CreateEnvironmentInput) SetMaxWorkers(v int64) *CreateEnvironmentInput { + s.MaxWorkers = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateEnvironmentInput) SetName(v string) *CreateEnvironmentInput { + s.Name = &v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *CreateEnvironmentInput) SetNetworkConfiguration(v *NetworkConfiguration) *CreateEnvironmentInput { + s.NetworkConfiguration = v + return s +} + +// SetPluginsS3ObjectVersion sets the PluginsS3ObjectVersion field's value. +func (s *CreateEnvironmentInput) SetPluginsS3ObjectVersion(v string) *CreateEnvironmentInput { + s.PluginsS3ObjectVersion = &v + return s +} + +// SetPluginsS3Path sets the PluginsS3Path field's value. +func (s *CreateEnvironmentInput) SetPluginsS3Path(v string) *CreateEnvironmentInput { + s.PluginsS3Path = &v + return s +} + +// SetRequirementsS3ObjectVersion sets the RequirementsS3ObjectVersion field's value. +func (s *CreateEnvironmentInput) SetRequirementsS3ObjectVersion(v string) *CreateEnvironmentInput { + s.RequirementsS3ObjectVersion = &v + return s +} + +// SetRequirementsS3Path sets the RequirementsS3Path field's value. +func (s *CreateEnvironmentInput) SetRequirementsS3Path(v string) *CreateEnvironmentInput { + s.RequirementsS3Path = &v + return s +} + +// SetSourceBucketArn sets the SourceBucketArn field's value. +func (s *CreateEnvironmentInput) SetSourceBucketArn(v string) *CreateEnvironmentInput { + s.SourceBucketArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateEnvironmentInput) SetTags(v map[string]*string) *CreateEnvironmentInput { + s.Tags = v + return s +} + +// SetWebserverAccessMode sets the WebserverAccessMode field's value. +func (s *CreateEnvironmentInput) SetWebserverAccessMode(v string) *CreateEnvironmentInput { + s.WebserverAccessMode = &v + return s +} + +// SetWeeklyMaintenanceWindowStart sets the WeeklyMaintenanceWindowStart field's value. +func (s *CreateEnvironmentInput) SetWeeklyMaintenanceWindowStart(v string) *CreateEnvironmentInput { + s.WeeklyMaintenanceWindowStart = &v + return s +} + +type CreateEnvironmentOutput struct { + _ struct{} `type:"structure"` + + // The resulting Amazon MWAA envirnonment ARN. + Arn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateEnvironmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEnvironmentOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateEnvironmentOutput) SetArn(v string) *CreateEnvironmentOutput { + s.Arn = &v + return s +} + +type CreateWebLoginTokenInput struct { + _ struct{} `type:"structure"` + + // Create an Airflow Web UI login token request for a MWAA environment. + // + // Name is a required field + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWebLoginTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebLoginTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWebLoginTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWebLoginTokenInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateWebLoginTokenInput) SetName(v string) *CreateWebLoginTokenInput { + s.Name = &v + return s +} + +type CreateWebLoginTokenOutput struct { + _ struct{} `type:"structure"` + + // Create an Airflow Web UI login token response for the provided webserver + // hostname. + WebServerHostname *string `min:"1" type:"string"` + + // Create an Airflow Web UI login token response for the provided JWT token. + WebToken *string `type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s CreateWebLoginTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebLoginTokenOutput) GoString() string { + return s.String() +} + +// SetWebServerHostname sets the WebServerHostname field's value. +func (s *CreateWebLoginTokenOutput) SetWebServerHostname(v string) *CreateWebLoginTokenOutput { + s.WebServerHostname = &v + return s +} + +// SetWebToken sets the WebToken field's value. +func (s *CreateWebLoginTokenOutput) SetWebToken(v string) *CreateWebLoginTokenOutput { + s.WebToken = &v + return s +} + +type DeleteEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The name of the environment to delete. + // + // Name is a required field + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEnvironmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEnvironmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEnvironmentInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteEnvironmentInput) SetName(v string) *DeleteEnvironmentInput { + s.Name = &v + return s +} + +type DeleteEnvironmentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEnvironmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEnvironmentOutput) GoString() string { + return s.String() +} + +// Internal only API. +type Dimension struct { + _ struct{} `type:"structure"` + + // Internal only API. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Internal only API. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Dimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Dimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Dimension"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *Dimension) SetName(v string) *Dimension { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Dimension) SetValue(v string) *Dimension { + s.Value = &v + return s +} + +// An Amazon MWAA environment. +type Environment struct { + _ struct{} `type:"structure"` + + // The Airflow Configuration Options of the Amazon MWAA Environment. + AirflowConfigurationOptions map[string]*string `type:"map"` + + // The AirflowV ersion of the Amazon MWAA Environment. + AirflowVersion *string `min:"1" type:"string"` + + // The ARN of the Amazon MWAA Environment. + Arn *string `min:"1" type:"string"` + + // The Created At date of the Amazon MWAA Environment. + CreatedAt *time.Time `type:"timestamp"` + + // The Dags S3 Path of the Amazon MWAA Environment. + DagS3Path *string `min:"1" type:"string"` + + // The Environment Class (size) of the Amazon MWAA Environment. + EnvironmentClass *string `min:"1" type:"string"` + + // The Execution Role ARN of the Amazon MWAA Environment. + ExecutionRoleArn *string `min:"1" type:"string"` + + // The Kms Key of the Amazon MWAA Environment. + KmsKey *string `min:"1" type:"string"` + + // Last update information for the environment. + LastUpdate *LastUpdate `type:"structure"` + + // The Logging Configuration of the Amazon MWAA Environment. + LoggingConfiguration *LoggingConfiguration `type:"structure"` + + // The Maximum Workers of the Amazon MWAA Environment. + MaxWorkers *int64 `min:"1" type:"integer"` + + // The name of the Amazon MWAA Environment. + Name *string `min:"1" type:"string"` + + // Provide the security group and subnet IDs for the workers and scheduler. + NetworkConfiguration *NetworkConfiguration `type:"structure"` + + // The Plugins.zip S3 Object Version of the Amazon MWAA Environment. + PluginsS3ObjectVersion *string `min:"1" type:"string"` + + // The Plugins.zip S3 Path of the Amazon MWAA Environment. + PluginsS3Path *string `min:"1" type:"string"` + + // The Requirements.txt file S3 Object Version of the Amazon MWAA Environment. + RequirementsS3ObjectVersion *string `min:"1" type:"string"` + + // The Requirement.txt S3 Path of the Amazon MWAA Environment. + RequirementsS3Path *string `min:"1" type:"string"` + + // The Service Role ARN of the Amazon MWAA Environment. + ServiceRoleArn *string `min:"1" type:"string"` + + // The Source S3 Bucket ARN of the Amazon MWAA Environment. + SourceBucketArn *string `min:"1" type:"string"` + + // The status of the Amazon MWAA Environment. + Status *string `type:"string" enum:"EnvironmentStatus"` + + // The Tags of the Amazon MWAA Environment. + Tags map[string]*string `min:"1" type:"map"` + + // The Webserver Access Mode of the Amazon MWAA Environment (public or private + // only). + WebserverAccessMode *string `type:"string" enum:"WebserverAccessMode"` + + // The Webserver URL of the Amazon MWAA Environment. + WebserverUrl *string `min:"1" type:"string"` + + // The Weekly Maintenance Window Start of the Amazon MWAA Environment. + WeeklyMaintenanceWindowStart *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Environment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Environment) GoString() string { + return s.String() +} + +// SetAirflowConfigurationOptions sets the AirflowConfigurationOptions field's value. +func (s *Environment) SetAirflowConfigurationOptions(v map[string]*string) *Environment { + s.AirflowConfigurationOptions = v + return s +} + +// SetAirflowVersion sets the AirflowVersion field's value. +func (s *Environment) SetAirflowVersion(v string) *Environment { + s.AirflowVersion = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *Environment) SetArn(v string) *Environment { + s.Arn = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *Environment) SetCreatedAt(v time.Time) *Environment { + s.CreatedAt = &v + return s +} + +// SetDagS3Path sets the DagS3Path field's value. +func (s *Environment) SetDagS3Path(v string) *Environment { + s.DagS3Path = &v + return s +} + +// SetEnvironmentClass sets the EnvironmentClass field's value. +func (s *Environment) SetEnvironmentClass(v string) *Environment { + s.EnvironmentClass = &v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *Environment) SetExecutionRoleArn(v string) *Environment { + s.ExecutionRoleArn = &v + return s +} + +// SetKmsKey sets the KmsKey field's value. +func (s *Environment) SetKmsKey(v string) *Environment { + s.KmsKey = &v + return s +} + +// SetLastUpdate sets the LastUpdate field's value. +func (s *Environment) SetLastUpdate(v *LastUpdate) *Environment { + s.LastUpdate = v + return s +} + +// SetLoggingConfiguration sets the LoggingConfiguration field's value. +func (s *Environment) SetLoggingConfiguration(v *LoggingConfiguration) *Environment { + s.LoggingConfiguration = v + return s +} + +// SetMaxWorkers sets the MaxWorkers field's value. +func (s *Environment) SetMaxWorkers(v int64) *Environment { + s.MaxWorkers = &v + return s +} + +// SetName sets the Name field's value. +func (s *Environment) SetName(v string) *Environment { + s.Name = &v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *Environment) SetNetworkConfiguration(v *NetworkConfiguration) *Environment { + s.NetworkConfiguration = v + return s +} + +// SetPluginsS3ObjectVersion sets the PluginsS3ObjectVersion field's value. +func (s *Environment) SetPluginsS3ObjectVersion(v string) *Environment { + s.PluginsS3ObjectVersion = &v + return s +} + +// SetPluginsS3Path sets the PluginsS3Path field's value. +func (s *Environment) SetPluginsS3Path(v string) *Environment { + s.PluginsS3Path = &v + return s +} + +// SetRequirementsS3ObjectVersion sets the RequirementsS3ObjectVersion field's value. +func (s *Environment) SetRequirementsS3ObjectVersion(v string) *Environment { + s.RequirementsS3ObjectVersion = &v + return s +} + +// SetRequirementsS3Path sets the RequirementsS3Path field's value. +func (s *Environment) SetRequirementsS3Path(v string) *Environment { + s.RequirementsS3Path = &v + return s +} + +// SetServiceRoleArn sets the ServiceRoleArn field's value. +func (s *Environment) SetServiceRoleArn(v string) *Environment { + s.ServiceRoleArn = &v + return s +} + +// SetSourceBucketArn sets the SourceBucketArn field's value. +func (s *Environment) SetSourceBucketArn(v string) *Environment { + s.SourceBucketArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Environment) SetStatus(v string) *Environment { + s.Status = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Environment) SetTags(v map[string]*string) *Environment { + s.Tags = v + return s +} + +// SetWebserverAccessMode sets the WebserverAccessMode field's value. +func (s *Environment) SetWebserverAccessMode(v string) *Environment { + s.WebserverAccessMode = &v + return s +} + +// SetWebserverUrl sets the WebserverUrl field's value. +func (s *Environment) SetWebserverUrl(v string) *Environment { + s.WebserverUrl = &v + return s +} + +// SetWeeklyMaintenanceWindowStart sets the WeeklyMaintenanceWindowStart field's value. +func (s *Environment) SetWeeklyMaintenanceWindowStart(v string) *Environment { + s.WeeklyMaintenanceWindowStart = &v + return s +} + +type GetEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The name of the environment to retrieve. + // + // Name is a required field + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEnvironmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetEnvironmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetEnvironmentInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetEnvironmentInput) SetName(v string) *GetEnvironmentInput { + s.Name = &v + return s +} + +type GetEnvironmentOutput struct { + _ struct{} `type:"structure"` + + // A JSON blob with environment details. + Environment *Environment `type:"structure"` +} + +// String returns the string representation +func (s GetEnvironmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEnvironmentOutput) GoString() string { + return s.String() +} + +// SetEnvironment sets the Environment field's value. +func (s *GetEnvironmentOutput) SetEnvironment(v *Environment) *GetEnvironmentOutput { + s.Environment = v + return s +} + +// InternalServerException: An internal error has occurred. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Last update information for the environment. +type LastUpdate struct { + _ struct{} `type:"structure"` + + // Time that last update occurred. + CreatedAt *time.Time `type:"timestamp"` + + // Error string of last update, if applicable. + Error *UpdateError `type:"structure"` + + // Status of last update of SUCCESS, FAILED, CREATING, DELETING. + Status *string `type:"string" enum:"UpdateStatus"` +} + +// String returns the string representation +func (s LastUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LastUpdate) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *LastUpdate) SetCreatedAt(v time.Time) *LastUpdate { + s.CreatedAt = &v + return s +} + +// SetError sets the Error field's value. +func (s *LastUpdate) SetError(v *UpdateError) *LastUpdate { + s.Error = v + return s +} + +// SetStatus sets the Status field's value. +func (s *LastUpdate) SetStatus(v string) *LastUpdate { + s.Status = &v + return s +} + +type ListEnvironmentsInput struct { + _ struct{} `type:"structure"` + + // The maximum results when listing MWAA environments. + MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` + + // The Next Token when listing MWAA environments. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` +} + +// String returns the string representation +func (s ListEnvironmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEnvironmentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEnvironmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEnvironmentsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListEnvironmentsInput) SetMaxResults(v int64) *ListEnvironmentsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEnvironmentsInput) SetNextToken(v string) *ListEnvironmentsInput { + s.NextToken = &v + return s +} + +type ListEnvironmentsOutput struct { + _ struct{} `type:"structure"` + + // The list of Amazon MWAA Environments. + // + // Environments is a required field + Environments []*string `type:"list" required:"true"` + + // The Next Token when listing MWAA environments. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListEnvironmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEnvironmentsOutput) GoString() string { + return s.String() +} + +// SetEnvironments sets the Environments field's value. +func (s *ListEnvironmentsOutput) SetEnvironments(v []*string) *ListEnvironmentsOutput { + s.Environments = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEnvironmentsOutput) SetNextToken(v string) *ListEnvironmentsOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the MWAA environment. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags of the MWAA environments. + Tags map[string]*string `min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// The Logging Configuration of your Amazon MWAA environment. +type LoggingConfiguration struct { + _ struct{} `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + DagProcessingLogs *ModuleLoggingConfiguration `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + SchedulerLogs *ModuleLoggingConfiguration `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + TaskLogs *ModuleLoggingConfiguration `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + WebserverLogs *ModuleLoggingConfiguration `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + WorkerLogs *ModuleLoggingConfiguration `type:"structure"` +} + +// String returns the string representation +func (s LoggingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingConfiguration) GoString() string { + return s.String() +} + +// SetDagProcessingLogs sets the DagProcessingLogs field's value. +func (s *LoggingConfiguration) SetDagProcessingLogs(v *ModuleLoggingConfiguration) *LoggingConfiguration { + s.DagProcessingLogs = v + return s +} + +// SetSchedulerLogs sets the SchedulerLogs field's value. +func (s *LoggingConfiguration) SetSchedulerLogs(v *ModuleLoggingConfiguration) *LoggingConfiguration { + s.SchedulerLogs = v + return s +} + +// SetTaskLogs sets the TaskLogs field's value. +func (s *LoggingConfiguration) SetTaskLogs(v *ModuleLoggingConfiguration) *LoggingConfiguration { + s.TaskLogs = v + return s +} + +// SetWebserverLogs sets the WebserverLogs field's value. +func (s *LoggingConfiguration) SetWebserverLogs(v *ModuleLoggingConfiguration) *LoggingConfiguration { + s.WebserverLogs = v + return s +} + +// SetWorkerLogs sets the WorkerLogs field's value. +func (s *LoggingConfiguration) SetWorkerLogs(v *ModuleLoggingConfiguration) *LoggingConfiguration { + s.WorkerLogs = v + return s +} + +// The Logging Configuration of your Amazon MWAA environment. +type LoggingConfigurationInput struct { + _ struct{} `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + DagProcessingLogs *ModuleLoggingConfigurationInput `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + SchedulerLogs *ModuleLoggingConfigurationInput `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + TaskLogs *ModuleLoggingConfigurationInput `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + WebserverLogs *ModuleLoggingConfigurationInput `type:"structure"` + + // A JSON blob that provides configuration to use for logging with respect to + // the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, + // WebserverLogs, and WorkerLogs. + WorkerLogs *ModuleLoggingConfigurationInput `type:"structure"` +} + +// String returns the string representation +func (s LoggingConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingConfigurationInput"} + if s.DagProcessingLogs != nil { + if err := s.DagProcessingLogs.Validate(); err != nil { + invalidParams.AddNested("DagProcessingLogs", err.(request.ErrInvalidParams)) + } + } + if s.SchedulerLogs != nil { + if err := s.SchedulerLogs.Validate(); err != nil { + invalidParams.AddNested("SchedulerLogs", err.(request.ErrInvalidParams)) + } + } + if s.TaskLogs != nil { + if err := s.TaskLogs.Validate(); err != nil { + invalidParams.AddNested("TaskLogs", err.(request.ErrInvalidParams)) + } + } + if s.WebserverLogs != nil { + if err := s.WebserverLogs.Validate(); err != nil { + invalidParams.AddNested("WebserverLogs", err.(request.ErrInvalidParams)) + } + } + if s.WorkerLogs != nil { + if err := s.WorkerLogs.Validate(); err != nil { + invalidParams.AddNested("WorkerLogs", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDagProcessingLogs sets the DagProcessingLogs field's value. +func (s *LoggingConfigurationInput) SetDagProcessingLogs(v *ModuleLoggingConfigurationInput) *LoggingConfigurationInput { + s.DagProcessingLogs = v + return s +} + +// SetSchedulerLogs sets the SchedulerLogs field's value. +func (s *LoggingConfigurationInput) SetSchedulerLogs(v *ModuleLoggingConfigurationInput) *LoggingConfigurationInput { + s.SchedulerLogs = v + return s +} + +// SetTaskLogs sets the TaskLogs field's value. +func (s *LoggingConfigurationInput) SetTaskLogs(v *ModuleLoggingConfigurationInput) *LoggingConfigurationInput { + s.TaskLogs = v + return s +} + +// SetWebserverLogs sets the WebserverLogs field's value. +func (s *LoggingConfigurationInput) SetWebserverLogs(v *ModuleLoggingConfigurationInput) *LoggingConfigurationInput { + s.WebserverLogs = v + return s +} + +// SetWorkerLogs sets the WorkerLogs field's value. +func (s *LoggingConfigurationInput) SetWorkerLogs(v *ModuleLoggingConfigurationInput) *LoggingConfigurationInput { + s.WorkerLogs = v + return s +} + +// Internal only API. +type MetricDatum struct { + _ struct{} `type:"structure"` + + // Internal only API. + Dimensions []*Dimension `type:"list"` + + // Internal only API. + // + // MetricName is a required field + MetricName *string `type:"string" required:"true"` + + // Internal only API. + StatisticValues *StatisticSet `type:"structure"` + + // Internal only API. + // + // Timestamp is a required field + Timestamp *time.Time `type:"timestamp" required:"true"` + + // Unit + Unit *string `type:"string" enum:"Unit"` + + // Internal only API. + Value *float64 `type:"double"` +} + +// String returns the string representation +func (s MetricDatum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricDatum) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricDatum) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricDatum"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Timestamp == nil { + invalidParams.Add(request.NewErrParamRequired("Timestamp")) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *MetricDatum) SetDimensions(v []*Dimension) *MetricDatum { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *MetricDatum) SetMetricName(v string) *MetricDatum { + s.MetricName = &v + return s +} + +// SetStatisticValues sets the StatisticValues field's value. +func (s *MetricDatum) SetStatisticValues(v *StatisticSet) *MetricDatum { + s.StatisticValues = v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *MetricDatum) SetTimestamp(v time.Time) *MetricDatum { + s.Timestamp = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *MetricDatum) SetUnit(v string) *MetricDatum { + s.Unit = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetricDatum) SetValue(v float64) *MetricDatum { + s.Value = &v + return s +} + +// A JSON blob that provides configuration to use for logging with respect to +// the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, +// WebserverLogs, and WorkerLogs. +type ModuleLoggingConfiguration struct { + _ struct{} `type:"structure"` + + // Provides the ARN for the CloudWatch group where the logs will be published. + CloudWatchLogGroupArn *string `min:"1" type:"string"` + + // Defines that the logging module is enabled. + Enabled *bool `type:"boolean"` + + // Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO. + LogLevel *string `type:"string" enum:"LoggingLevel"` +} + +// String returns the string representation +func (s ModuleLoggingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModuleLoggingConfiguration) GoString() string { + return s.String() +} + +// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. +func (s *ModuleLoggingConfiguration) SetCloudWatchLogGroupArn(v string) *ModuleLoggingConfiguration { + s.CloudWatchLogGroupArn = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *ModuleLoggingConfiguration) SetEnabled(v bool) *ModuleLoggingConfiguration { + s.Enabled = &v + return s +} + +// SetLogLevel sets the LogLevel field's value. +func (s *ModuleLoggingConfiguration) SetLogLevel(v string) *ModuleLoggingConfiguration { + s.LogLevel = &v + return s +} + +// A JSON blob that provides configuration to use for logging with respect to +// the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, +// WebserverLogs, and WorkerLogs. +type ModuleLoggingConfigurationInput struct { + _ struct{} `type:"structure"` + + // Defines that the logging module is enabled. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` + + // Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO. + // + // LogLevel is a required field + LogLevel *string `type:"string" required:"true" enum:"LoggingLevel"` +} + +// String returns the string representation +func (s ModuleLoggingConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModuleLoggingConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModuleLoggingConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModuleLoggingConfigurationInput"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.LogLevel == nil { + invalidParams.Add(request.NewErrParamRequired("LogLevel")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *ModuleLoggingConfigurationInput) SetEnabled(v bool) *ModuleLoggingConfigurationInput { + s.Enabled = &v + return s +} + +// SetLogLevel sets the LogLevel field's value. +func (s *ModuleLoggingConfigurationInput) SetLogLevel(v string) *ModuleLoggingConfigurationInput { + s.LogLevel = &v + return s +} + +// Provide the security group and subnet IDs for the workers and scheduler. +type NetworkConfiguration struct { + _ struct{} `type:"structure"` + + // A JSON list of 1 or more security groups IDs by name, in the same VPC as + // the subnets. + SecurityGroupIds []*string `min:"1" type:"list"` + + // Provide a JSON list of 2 subnet IDs by name. These must be private subnets, + // in the same VPC, in two different availability zones. + SubnetIds []*string `min:"2" type:"list"` +} + +// String returns the string representation +func (s NetworkConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NetworkConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NetworkConfiguration"} + if s.SecurityGroupIds != nil && len(s.SecurityGroupIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityGroupIds", 1)) + } + if s.SubnetIds != nil && len(s.SubnetIds) < 2 { + invalidParams.Add(request.NewErrParamMinLen("SubnetIds", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *NetworkConfiguration) SetSecurityGroupIds(v []*string) *NetworkConfiguration { + s.SecurityGroupIds = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *NetworkConfiguration) SetSubnetIds(v []*string) *NetworkConfiguration { + s.SubnetIds = v + return s +} + +type PublishMetricsInput struct { + _ struct{} `type:"structure"` + + // Publishes environment metric data to Amazon CloudWatch. + // + // EnvironmentName is a required field + EnvironmentName *string `location:"uri" locationName:"EnvironmentName" min:"1" type:"string" required:"true"` + + // Publishes metric data points to Amazon CloudWatch. CloudWatch associates + // the data points with the specified metrica. + // + // MetricData is a required field + MetricData []*MetricDatum `type:"list" required:"true"` +} + +// String returns the string representation +func (s PublishMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishMetricsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PublishMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PublishMetricsInput"} + if s.EnvironmentName == nil { + invalidParams.Add(request.NewErrParamRequired("EnvironmentName")) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 1)) + } + if s.MetricData == nil { + invalidParams.Add(request.NewErrParamRequired("MetricData")) + } + if s.MetricData != nil { + for i, v := range s.MetricData { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricData", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnvironmentName sets the EnvironmentName field's value. +func (s *PublishMetricsInput) SetEnvironmentName(v string) *PublishMetricsInput { + s.EnvironmentName = &v + return s +} + +// SetMetricData sets the MetricData field's value. +func (s *PublishMetricsInput) SetMetricData(v []*MetricDatum) *PublishMetricsInput { + s.MetricData = v + return s +} + +type PublishMetricsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PublishMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishMetricsOutput) GoString() string { + return s.String() +} + +// ResourceNotFoundException: The resource is not available. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Internal only API. +type StatisticSet struct { + _ struct{} `type:"structure"` + + // Internal only API. + Maximum *float64 `type:"double"` + + // Internal only API. + Minimum *float64 `type:"double"` + + // Internal only API. + SampleCount *int64 `type:"integer"` + + // Internal only API. + Sum *float64 `type:"double"` +} + +// String returns the string representation +func (s StatisticSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatisticSet) GoString() string { + return s.String() +} + +// SetMaximum sets the Maximum field's value. +func (s *StatisticSet) SetMaximum(v float64) *StatisticSet { + s.Maximum = &v + return s +} + +// SetMinimum sets the Minimum field's value. +func (s *StatisticSet) SetMinimum(v float64) *StatisticSet { + s.Minimum = &v + return s +} + +// SetSampleCount sets the SampleCount field's value. +func (s *StatisticSet) SetSampleCount(v int64) *StatisticSet { + s.SampleCount = &v + return s +} + +// SetSum sets the Sum field's value. +func (s *StatisticSet) SetSum(v float64) *StatisticSet { + s.Sum = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The tag resource ARN of the MWAA environments. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" min:"1" type:"string" required:"true"` + + // The tag resource tag of the MWAA environments. + // + // Tags is a required field + Tags map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The tag resource ARN of the MWAA environments. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" min:"1" type:"string" required:"true"` + + // The tag resource key of the MWAA environments. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateEnvironmentInput struct { + _ struct{} `type:"structure"` + + // The Airflow Configuration Options to update of your Amazon MWAA environment. + AirflowConfigurationOptions map[string]*string `type:"map" sensitive:"true"` + + // The Airflow Version to update of your Amazon MWAA environment. + AirflowVersion *string `min:"1" type:"string"` + + // The Dags folder S3 Path to update of your Amazon MWAA environment. + DagS3Path *string `min:"1" type:"string"` + + // The Environment Class to update of your Amazon MWAA environment. + EnvironmentClass *string `min:"1" type:"string"` + + // The Executio Role ARN to update of your Amazon MWAA environment. + ExecutionRoleArn *string `min:"1" type:"string"` + + // The Logging Configuration to update of your Amazon MWAA environment. + LoggingConfiguration *LoggingConfigurationInput `type:"structure"` + + // The Maximum Workers to update of your Amazon MWAA environment. + MaxWorkers *int64 `min:"1" type:"integer"` + + // The name of your Amazon MWAA environment that you wish to update. + // + // Name is a required field + Name *string `location:"uri" locationName:"Name" min:"1" type:"string" required:"true"` + + // The Network Configuration to update of your Amazon MWAA environment. + NetworkConfiguration *UpdateNetworkConfigurationInput `type:"structure"` + + // The Plugins.zip S3 Object Version to update of your Amazon MWAA environment. + PluginsS3ObjectVersion *string `min:"1" type:"string"` + + // The Plugins.zip S3 Path to update of your Amazon MWAA environment. + PluginsS3Path *string `min:"1" type:"string"` + + // The Requirements.txt S3 ObjectV ersion to update of your Amazon MWAA environment. + RequirementsS3ObjectVersion *string `min:"1" type:"string"` + + // The Requirements.txt S3 Path to update of your Amazon MWAA environment. + RequirementsS3Path *string `min:"1" type:"string"` + + // The S3 Source Bucket ARN to update of your Amazon MWAA environment. + SourceBucketArn *string `min:"1" type:"string"` + + // The Webserver Access Mode to update of your Amazon MWAA environment. + WebserverAccessMode *string `type:"string" enum:"WebserverAccessMode"` + + // The Weekly Maintenance Window Start to update of your Amazon MWAA environment. + WeeklyMaintenanceWindowStart *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateEnvironmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEnvironmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateEnvironmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateEnvironmentInput"} + if s.AirflowVersion != nil && len(*s.AirflowVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AirflowVersion", 1)) + } + if s.DagS3Path != nil && len(*s.DagS3Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DagS3Path", 1)) + } + if s.EnvironmentClass != nil && len(*s.EnvironmentClass) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentClass", 1)) + } + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleArn", 1)) + } + if s.MaxWorkers != nil && *s.MaxWorkers < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxWorkers", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.PluginsS3ObjectVersion != nil && len(*s.PluginsS3ObjectVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PluginsS3ObjectVersion", 1)) + } + if s.PluginsS3Path != nil && len(*s.PluginsS3Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PluginsS3Path", 1)) + } + if s.RequirementsS3ObjectVersion != nil && len(*s.RequirementsS3ObjectVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequirementsS3ObjectVersion", 1)) + } + if s.RequirementsS3Path != nil && len(*s.RequirementsS3Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequirementsS3Path", 1)) + } + if s.SourceBucketArn != nil && len(*s.SourceBucketArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceBucketArn", 1)) + } + if s.WeeklyMaintenanceWindowStart != nil && len(*s.WeeklyMaintenanceWindowStart) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceWindowStart", 1)) + } + if s.LoggingConfiguration != nil { + if err := s.LoggingConfiguration.Validate(); err != nil { + invalidParams.AddNested("LoggingConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.NetworkConfiguration != nil { + if err := s.NetworkConfiguration.Validate(); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAirflowConfigurationOptions sets the AirflowConfigurationOptions field's value. +func (s *UpdateEnvironmentInput) SetAirflowConfigurationOptions(v map[string]*string) *UpdateEnvironmentInput { + s.AirflowConfigurationOptions = v + return s +} + +// SetAirflowVersion sets the AirflowVersion field's value. +func (s *UpdateEnvironmentInput) SetAirflowVersion(v string) *UpdateEnvironmentInput { + s.AirflowVersion = &v + return s +} + +// SetDagS3Path sets the DagS3Path field's value. +func (s *UpdateEnvironmentInput) SetDagS3Path(v string) *UpdateEnvironmentInput { + s.DagS3Path = &v + return s +} + +// SetEnvironmentClass sets the EnvironmentClass field's value. +func (s *UpdateEnvironmentInput) SetEnvironmentClass(v string) *UpdateEnvironmentInput { + s.EnvironmentClass = &v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *UpdateEnvironmentInput) SetExecutionRoleArn(v string) *UpdateEnvironmentInput { + s.ExecutionRoleArn = &v + return s +} + +// SetLoggingConfiguration sets the LoggingConfiguration field's value. +func (s *UpdateEnvironmentInput) SetLoggingConfiguration(v *LoggingConfigurationInput) *UpdateEnvironmentInput { + s.LoggingConfiguration = v + return s +} + +// SetMaxWorkers sets the MaxWorkers field's value. +func (s *UpdateEnvironmentInput) SetMaxWorkers(v int64) *UpdateEnvironmentInput { + s.MaxWorkers = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateEnvironmentInput) SetName(v string) *UpdateEnvironmentInput { + s.Name = &v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *UpdateEnvironmentInput) SetNetworkConfiguration(v *UpdateNetworkConfigurationInput) *UpdateEnvironmentInput { + s.NetworkConfiguration = v + return s +} + +// SetPluginsS3ObjectVersion sets the PluginsS3ObjectVersion field's value. +func (s *UpdateEnvironmentInput) SetPluginsS3ObjectVersion(v string) *UpdateEnvironmentInput { + s.PluginsS3ObjectVersion = &v + return s +} + +// SetPluginsS3Path sets the PluginsS3Path field's value. +func (s *UpdateEnvironmentInput) SetPluginsS3Path(v string) *UpdateEnvironmentInput { + s.PluginsS3Path = &v + return s +} + +// SetRequirementsS3ObjectVersion sets the RequirementsS3ObjectVersion field's value. +func (s *UpdateEnvironmentInput) SetRequirementsS3ObjectVersion(v string) *UpdateEnvironmentInput { + s.RequirementsS3ObjectVersion = &v + return s +} + +// SetRequirementsS3Path sets the RequirementsS3Path field's value. +func (s *UpdateEnvironmentInput) SetRequirementsS3Path(v string) *UpdateEnvironmentInput { + s.RequirementsS3Path = &v + return s +} + +// SetSourceBucketArn sets the SourceBucketArn field's value. +func (s *UpdateEnvironmentInput) SetSourceBucketArn(v string) *UpdateEnvironmentInput { + s.SourceBucketArn = &v + return s +} + +// SetWebserverAccessMode sets the WebserverAccessMode field's value. +func (s *UpdateEnvironmentInput) SetWebserverAccessMode(v string) *UpdateEnvironmentInput { + s.WebserverAccessMode = &v + return s +} + +// SetWeeklyMaintenanceWindowStart sets the WeeklyMaintenanceWindowStart field's value. +func (s *UpdateEnvironmentInput) SetWeeklyMaintenanceWindowStart(v string) *UpdateEnvironmentInput { + s.WeeklyMaintenanceWindowStart = &v + return s +} + +type UpdateEnvironmentOutput struct { + _ struct{} `type:"structure"` + + // The ARN to update of your Amazon MWAA environment. + Arn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateEnvironmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateEnvironmentOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *UpdateEnvironmentOutput) SetArn(v string) *UpdateEnvironmentOutput { + s.Arn = &v + return s +} + +// Error information of update, if applicable. +type UpdateError struct { + _ struct{} `type:"structure"` + + // Error code of update. + ErrorCode *string `type:"string"` + + // Error message of update. + ErrorMessage *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *UpdateError) SetErrorCode(v string) *UpdateError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *UpdateError) SetErrorMessage(v string) *UpdateError { + s.ErrorMessage = &v + return s +} + +// Provide the security group and subnet IDs for the workers and scheduler. +type UpdateNetworkConfigurationInput struct { + _ struct{} `type:"structure"` + + // Provide a JSON list of 1 or more security groups IDs by name, in the same + // VPC as the subnets. + // + // SecurityGroupIds is a required field + SecurityGroupIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateNetworkConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNetworkConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateNetworkConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateNetworkConfigurationInput"} + if s.SecurityGroupIds == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroupIds")) + } + if s.SecurityGroupIds != nil && len(s.SecurityGroupIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityGroupIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *UpdateNetworkConfigurationInput) SetSecurityGroupIds(v []*string) *UpdateNetworkConfigurationInput { + s.SecurityGroupIds = v + return s +} + +// ValidationException: The provided input is not valid. +type ValidationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ValidationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationException) GoString() string { + return s.String() +} + +func newErrorValidationException(v protocol.ResponseMetadata) error { + return &ValidationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ValidationException) Code() string { + return "ValidationException" +} + +// Message returns the exception's message. +func (s *ValidationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ValidationException) OrigErr() error { + return nil +} + +func (s *ValidationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID +} + +const ( + // EnvironmentStatusCreating is a EnvironmentStatus enum value + EnvironmentStatusCreating = "CREATING" + + // EnvironmentStatusCreateFailed is a EnvironmentStatus enum value + EnvironmentStatusCreateFailed = "CREATE_FAILED" + + // EnvironmentStatusAvailable is a EnvironmentStatus enum value + EnvironmentStatusAvailable = "AVAILABLE" + + // EnvironmentStatusUpdating is a EnvironmentStatus enum value + EnvironmentStatusUpdating = "UPDATING" + + // EnvironmentStatusDeleting is a EnvironmentStatus enum value + EnvironmentStatusDeleting = "DELETING" + + // EnvironmentStatusDeleted is a EnvironmentStatus enum value + EnvironmentStatusDeleted = "DELETED" +) + +// EnvironmentStatus_Values returns all elements of the EnvironmentStatus enum +func EnvironmentStatus_Values() []string { + return []string{ + EnvironmentStatusCreating, + EnvironmentStatusCreateFailed, + EnvironmentStatusAvailable, + EnvironmentStatusUpdating, + EnvironmentStatusDeleting, + EnvironmentStatusDeleted, + } +} + +const ( + // LoggingLevelCritical is a LoggingLevel enum value + LoggingLevelCritical = "CRITICAL" + + // LoggingLevelError is a LoggingLevel enum value + LoggingLevelError = "ERROR" + + // LoggingLevelWarning is a LoggingLevel enum value + LoggingLevelWarning = "WARNING" + + // LoggingLevelInfo is a LoggingLevel enum value + LoggingLevelInfo = "INFO" + + // LoggingLevelDebug is a LoggingLevel enum value + LoggingLevelDebug = "DEBUG" +) + +// LoggingLevel_Values returns all elements of the LoggingLevel enum +func LoggingLevel_Values() []string { + return []string{ + LoggingLevelCritical, + LoggingLevelError, + LoggingLevelWarning, + LoggingLevelInfo, + LoggingLevelDebug, + } +} + +// Unit +const ( + // UnitSeconds is a Unit enum value + UnitSeconds = "Seconds" + + // UnitMicroseconds is a Unit enum value + UnitMicroseconds = "Microseconds" + + // UnitMilliseconds is a Unit enum value + UnitMilliseconds = "Milliseconds" + + // UnitBytes is a Unit enum value + UnitBytes = "Bytes" + + // UnitKilobytes is a Unit enum value + UnitKilobytes = "Kilobytes" + + // UnitMegabytes is a Unit enum value + UnitMegabytes = "Megabytes" + + // UnitGigabytes is a Unit enum value + UnitGigabytes = "Gigabytes" + + // UnitTerabytes is a Unit enum value + UnitTerabytes = "Terabytes" + + // UnitBits is a Unit enum value + UnitBits = "Bits" + + // UnitKilobits is a Unit enum value + UnitKilobits = "Kilobits" + + // UnitMegabits is a Unit enum value + UnitMegabits = "Megabits" + + // UnitGigabits is a Unit enum value + UnitGigabits = "Gigabits" + + // UnitTerabits is a Unit enum value + UnitTerabits = "Terabits" + + // UnitPercent is a Unit enum value + UnitPercent = "Percent" + + // UnitCount is a Unit enum value + UnitCount = "Count" + + // UnitBytesSecond is a Unit enum value + UnitBytesSecond = "Bytes/Second" + + // UnitKilobytesSecond is a Unit enum value + UnitKilobytesSecond = "Kilobytes/Second" + + // UnitMegabytesSecond is a Unit enum value + UnitMegabytesSecond = "Megabytes/Second" + + // UnitGigabytesSecond is a Unit enum value + UnitGigabytesSecond = "Gigabytes/Second" + + // UnitTerabytesSecond is a Unit enum value + UnitTerabytesSecond = "Terabytes/Second" + + // UnitBitsSecond is a Unit enum value + UnitBitsSecond = "Bits/Second" + + // UnitKilobitsSecond is a Unit enum value + UnitKilobitsSecond = "Kilobits/Second" + + // UnitMegabitsSecond is a Unit enum value + UnitMegabitsSecond = "Megabits/Second" + + // UnitGigabitsSecond is a Unit enum value + UnitGigabitsSecond = "Gigabits/Second" + + // UnitTerabitsSecond is a Unit enum value + UnitTerabitsSecond = "Terabits/Second" + + // UnitCountSecond is a Unit enum value + UnitCountSecond = "Count/Second" + + // UnitNone is a Unit enum value + UnitNone = "None" +) + +// Unit_Values returns all elements of the Unit enum +func Unit_Values() []string { + return []string{ + UnitSeconds, + UnitMicroseconds, + UnitMilliseconds, + UnitBytes, + UnitKilobytes, + UnitMegabytes, + UnitGigabytes, + UnitTerabytes, + UnitBits, + UnitKilobits, + UnitMegabits, + UnitGigabits, + UnitTerabits, + UnitPercent, + UnitCount, + UnitBytesSecond, + UnitKilobytesSecond, + UnitMegabytesSecond, + UnitGigabytesSecond, + UnitTerabytesSecond, + UnitBitsSecond, + UnitKilobitsSecond, + UnitMegabitsSecond, + UnitGigabitsSecond, + UnitTerabitsSecond, + UnitCountSecond, + UnitNone, + } +} + +const ( + // UpdateStatusSuccess is a UpdateStatus enum value + UpdateStatusSuccess = "SUCCESS" + + // UpdateStatusPending is a UpdateStatus enum value + UpdateStatusPending = "PENDING" + + // UpdateStatusFailed is a UpdateStatus enum value + UpdateStatusFailed = "FAILED" +) + +// UpdateStatus_Values returns all elements of the UpdateStatus enum +func UpdateStatus_Values() []string { + return []string{ + UpdateStatusSuccess, + UpdateStatusPending, + UpdateStatusFailed, + } +} + +const ( + // WebserverAccessModePrivateOnly is a WebserverAccessMode enum value + WebserverAccessModePrivateOnly = "PRIVATE_ONLY" + + // WebserverAccessModePublicOnly is a WebserverAccessMode enum value + WebserverAccessModePublicOnly = "PUBLIC_ONLY" +) + +// WebserverAccessMode_Values returns all elements of the WebserverAccessMode enum +func WebserverAccessMode_Values() []string { + return []string{ + WebserverAccessModePrivateOnly, + WebserverAccessModePublicOnly, + } +} diff --git a/service/mwaa/doc.go b/service/mwaa/doc.go new file mode 100644 index 00000000000..8d253388d91 --- /dev/null +++ b/service/mwaa/doc.go @@ -0,0 +1,30 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package mwaa provides the client and types for making API +// requests to AmazonMWAA. +// +// This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) +// API reference documentation. For more information, see What Is Amazon MWAA? +// (https://docs.aws.amazon.com/mwaa/latest/userguide/what-is-mwaa.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/mwaa-2020-07-01 for more information on this service. +// +// See mwaa package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/mwaa/ +// +// Using the Client +// +// To contact AmazonMWAA with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AmazonMWAA client MWAA for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/mwaa/#New +package mwaa diff --git a/service/mwaa/errors.go b/service/mwaa/errors.go new file mode 100644 index 00000000000..e6b76b7db88 --- /dev/null +++ b/service/mwaa/errors.go @@ -0,0 +1,42 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package mwaa + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA + // user guide to setup permissions to access the Web UI and CLI functionality. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // InternalServerException: An internal error has occurred. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // ResourceNotFoundException: The resource is not available. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // ValidationException: The provided input is not valid. + ErrCodeValidationException = "ValidationException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "InternalServerException": newErrorInternalServerException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ValidationException": newErrorValidationException, +} diff --git a/service/mwaa/mwaaiface/interface.go b/service/mwaa/mwaaiface/interface.go new file mode 100644 index 00000000000..312becafcbc --- /dev/null +++ b/service/mwaa/mwaaiface/interface.go @@ -0,0 +1,111 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package mwaaiface provides an interface to enable mocking the AmazonMWAA service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package mwaaiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/mwaa" +) + +// MWAAAPI provides an interface to enable mocking the +// mwaa.MWAA service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AmazonMWAA. +// func myFunc(svc mwaaiface.MWAAAPI) bool { +// // Make svc.CreateCliToken request +// } +// +// func main() { +// sess := session.New() +// svc := mwaa.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockMWAAClient struct { +// mwaaiface.MWAAAPI +// } +// func (m *mockMWAAClient) CreateCliToken(input *mwaa.CreateCliTokenInput) (*mwaa.CreateCliTokenOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockMWAAClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type MWAAAPI interface { + CreateCliToken(*mwaa.CreateCliTokenInput) (*mwaa.CreateCliTokenOutput, error) + CreateCliTokenWithContext(aws.Context, *mwaa.CreateCliTokenInput, ...request.Option) (*mwaa.CreateCliTokenOutput, error) + CreateCliTokenRequest(*mwaa.CreateCliTokenInput) (*request.Request, *mwaa.CreateCliTokenOutput) + + CreateEnvironment(*mwaa.CreateEnvironmentInput) (*mwaa.CreateEnvironmentOutput, error) + CreateEnvironmentWithContext(aws.Context, *mwaa.CreateEnvironmentInput, ...request.Option) (*mwaa.CreateEnvironmentOutput, error) + CreateEnvironmentRequest(*mwaa.CreateEnvironmentInput) (*request.Request, *mwaa.CreateEnvironmentOutput) + + CreateWebLoginToken(*mwaa.CreateWebLoginTokenInput) (*mwaa.CreateWebLoginTokenOutput, error) + CreateWebLoginTokenWithContext(aws.Context, *mwaa.CreateWebLoginTokenInput, ...request.Option) (*mwaa.CreateWebLoginTokenOutput, error) + CreateWebLoginTokenRequest(*mwaa.CreateWebLoginTokenInput) (*request.Request, *mwaa.CreateWebLoginTokenOutput) + + DeleteEnvironment(*mwaa.DeleteEnvironmentInput) (*mwaa.DeleteEnvironmentOutput, error) + DeleteEnvironmentWithContext(aws.Context, *mwaa.DeleteEnvironmentInput, ...request.Option) (*mwaa.DeleteEnvironmentOutput, error) + DeleteEnvironmentRequest(*mwaa.DeleteEnvironmentInput) (*request.Request, *mwaa.DeleteEnvironmentOutput) + + GetEnvironment(*mwaa.GetEnvironmentInput) (*mwaa.GetEnvironmentOutput, error) + GetEnvironmentWithContext(aws.Context, *mwaa.GetEnvironmentInput, ...request.Option) (*mwaa.GetEnvironmentOutput, error) + GetEnvironmentRequest(*mwaa.GetEnvironmentInput) (*request.Request, *mwaa.GetEnvironmentOutput) + + ListEnvironments(*mwaa.ListEnvironmentsInput) (*mwaa.ListEnvironmentsOutput, error) + ListEnvironmentsWithContext(aws.Context, *mwaa.ListEnvironmentsInput, ...request.Option) (*mwaa.ListEnvironmentsOutput, error) + ListEnvironmentsRequest(*mwaa.ListEnvironmentsInput) (*request.Request, *mwaa.ListEnvironmentsOutput) + + ListEnvironmentsPages(*mwaa.ListEnvironmentsInput, func(*mwaa.ListEnvironmentsOutput, bool) bool) error + ListEnvironmentsPagesWithContext(aws.Context, *mwaa.ListEnvironmentsInput, func(*mwaa.ListEnvironmentsOutput, bool) bool, ...request.Option) error + + ListTagsForResource(*mwaa.ListTagsForResourceInput) (*mwaa.ListTagsForResourceOutput, error) + ListTagsForResourceWithContext(aws.Context, *mwaa.ListTagsForResourceInput, ...request.Option) (*mwaa.ListTagsForResourceOutput, error) + ListTagsForResourceRequest(*mwaa.ListTagsForResourceInput) (*request.Request, *mwaa.ListTagsForResourceOutput) + + PublishMetrics(*mwaa.PublishMetricsInput) (*mwaa.PublishMetricsOutput, error) + PublishMetricsWithContext(aws.Context, *mwaa.PublishMetricsInput, ...request.Option) (*mwaa.PublishMetricsOutput, error) + PublishMetricsRequest(*mwaa.PublishMetricsInput) (*request.Request, *mwaa.PublishMetricsOutput) + + TagResource(*mwaa.TagResourceInput) (*mwaa.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *mwaa.TagResourceInput, ...request.Option) (*mwaa.TagResourceOutput, error) + TagResourceRequest(*mwaa.TagResourceInput) (*request.Request, *mwaa.TagResourceOutput) + + UntagResource(*mwaa.UntagResourceInput) (*mwaa.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *mwaa.UntagResourceInput, ...request.Option) (*mwaa.UntagResourceOutput, error) + UntagResourceRequest(*mwaa.UntagResourceInput) (*request.Request, *mwaa.UntagResourceOutput) + + UpdateEnvironment(*mwaa.UpdateEnvironmentInput) (*mwaa.UpdateEnvironmentOutput, error) + UpdateEnvironmentWithContext(aws.Context, *mwaa.UpdateEnvironmentInput, ...request.Option) (*mwaa.UpdateEnvironmentOutput, error) + UpdateEnvironmentRequest(*mwaa.UpdateEnvironmentInput) (*request.Request, *mwaa.UpdateEnvironmentOutput) +} + +var _ MWAAAPI = (*mwaa.MWAA)(nil) diff --git a/service/mwaa/service.go b/service/mwaa/service.go new file mode 100644 index 00000000000..df52d4b2051 --- /dev/null +++ b/service/mwaa/service.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package mwaa + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// MWAA provides the API operation methods for making requests to +// AmazonMWAA. See this package's package overview docs +// for details on the service. +// +// MWAA methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type MWAA struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "MWAA" // Name of service. + EndpointsID = "airflow" // ID to lookup a service endpoint with. + ServiceID = "MWAA" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the MWAA client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a MWAA client from just a session. +// svc := mwaa.New(mySession) +// +// // Create a MWAA client with additional configuration +// svc := mwaa.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *MWAA { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "airflow" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MWAA { + svc := &MWAA{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2020-07-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a MWAA operation and runs any +// custom request initialization. +func (c *MWAA) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/service/quicksight/api.go b/service/quicksight/api.go index 6daa1b83120..408ddf729f0 100644 --- a/service/quicksight/api.go +++ b/service/quicksight/api.go @@ -5357,6 +5357,17 @@ func (c *QuickSight) GetDashboardEmbedUrlRequest(input *GetDashboardEmbedUrlInpu // Amazon QuickSight currently has Standard Edition and Enterprise Edition. // Not every operation and capability is available in every edition. // +// * UnsupportedPricingPlanException +// This error indicates that you are calling an embedding operation in Amazon +// QuickSight without the required pricing plan on your AWS account. Before +// you can use anonymous embedding, a QuickSight administrator needs to add +// capacity pricing to QuickSight. You can do this on the Manage QuickSight +// page. +// +// After capacity pricing is added, you can enable anonymous embedding by using +// the GetDashboardEmbedUrl API operation with the --identity-type ANONYMOUS +// option. +// // * InternalFailureException // An internal failure occurred. // @@ -21295,9 +21306,17 @@ func (s *GeoSpatialColumnGroup) SetName(v string) *GeoSpatialColumnGroup { return s } +// Parameter input for the GetDashboardEmbedUrl operation. type GetDashboardEmbedUrlInput struct { _ struct{} `type:"structure"` + // A list of one or more dashboard ids that you want to add to a session that + // includes anonymous authorizations. IdentityType must be set to ANONYMOUS + // for this to work, because other other identity types authenticate as QuickSight + // users. For example, if you set "--dashboard-id dash_id1 --dashboard-id dash_id2 + // dash_id3 identity-type ANONYMOUS", the session can access all three dashboards. + AdditionalDashboardIds []*string `location:"querystring" locationName:"additional-dashboard-ids" min:"1" type:"list"` + // The ID for the AWS account that contains the dashboard that you're embedding. // // AwsAccountId is a required field @@ -21311,7 +21330,11 @@ type GetDashboardEmbedUrlInput struct { // The authentication method that the user uses to sign in. // // IdentityType is a required field - IdentityType *string `location:"querystring" locationName:"creds-type" type:"string" required:"true" enum:"IdentityType"` + IdentityType *string `location:"querystring" locationName:"creds-type" type:"string" required:"true" enum:"EmbeddingIdentityType"` + + // The QuickSight namespace that contains the dashboard IDs in this request. + // If you're not using a custom namespace, set this to "default". + Namespace *string `location:"querystring" locationName:"namespace" type:"string"` // Remove the reset button on the embedded dashboard. The default is FALSE, // which enables the reset button. @@ -21363,6 +21386,9 @@ func (s GetDashboardEmbedUrlInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *GetDashboardEmbedUrlInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetDashboardEmbedUrlInput"} + if s.AdditionalDashboardIds != nil && len(s.AdditionalDashboardIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AdditionalDashboardIds", 1)) + } if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } @@ -21388,6 +21414,12 @@ func (s *GetDashboardEmbedUrlInput) Validate() error { return nil } +// SetAdditionalDashboardIds sets the AdditionalDashboardIds field's value. +func (s *GetDashboardEmbedUrlInput) SetAdditionalDashboardIds(v []*string) *GetDashboardEmbedUrlInput { + s.AdditionalDashboardIds = v + return s +} + // SetAwsAccountId sets the AwsAccountId field's value. func (s *GetDashboardEmbedUrlInput) SetAwsAccountId(v string) *GetDashboardEmbedUrlInput { s.AwsAccountId = &v @@ -21406,6 +21438,12 @@ func (s *GetDashboardEmbedUrlInput) SetIdentityType(v string) *GetDashboardEmbed return s } +// SetNamespace sets the Namespace field's value. +func (s *GetDashboardEmbedUrlInput) SetNamespace(v string) *GetDashboardEmbedUrlInput { + s.Namespace = &v + return s +} + // SetResetDisabled sets the ResetDisabled field's value. func (s *GetDashboardEmbedUrlInput) SetResetDisabled(v bool) *GetDashboardEmbedUrlInput { s.ResetDisabled = &v @@ -21436,6 +21474,7 @@ func (s *GetDashboardEmbedUrlInput) SetUserArn(v string) *GetDashboardEmbedUrlIn return s } +// Output returned from the GetDashboardEmbedUrl operation. type GetDashboardEmbedUrlOutput struct { _ struct{} `type:"structure"` @@ -29633,6 +29672,73 @@ func (s *UIColorPalette) SetWarningForeground(v string) *UIColorPalette { return s } +// This error indicates that you are calling an embedding operation in Amazon +// QuickSight without the required pricing plan on your AWS account. Before +// you can use anonymous embedding, a QuickSight administrator needs to add +// capacity pricing to QuickSight. You can do this on the Manage QuickSight +// page. +// +// After capacity pricing is added, you can enable anonymous embedding by using +// the GetDashboardEmbedUrl API operation with the --identity-type ANONYMOUS +// option. +type UnsupportedPricingPlanException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + // The AWS request ID for this request. + RequestId *string `type:"string"` +} + +// String returns the string representation +func (s UnsupportedPricingPlanException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedPricingPlanException) GoString() string { + return s.String() +} + +func newErrorUnsupportedPricingPlanException(v protocol.ResponseMetadata) error { + return &UnsupportedPricingPlanException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedPricingPlanException) Code() string { + return "UnsupportedPricingPlanException" +} + +// Message returns the exception's message. +func (s *UnsupportedPricingPlanException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedPricingPlanException) OrigErr() error { + return nil +} + +func (s *UnsupportedPricingPlanException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedPricingPlanException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedPricingPlanException) RequestID() string { + return s.RespMetadata.RequestID +} + // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. // Amazon QuickSight currently has Standard Edition and Enterprise Edition. @@ -33766,6 +33872,26 @@ func Edition_Values() []string { } } +const ( + // EmbeddingIdentityTypeIam is a EmbeddingIdentityType enum value + EmbeddingIdentityTypeIam = "IAM" + + // EmbeddingIdentityTypeQuicksight is a EmbeddingIdentityType enum value + EmbeddingIdentityTypeQuicksight = "QUICKSIGHT" + + // EmbeddingIdentityTypeAnonymous is a EmbeddingIdentityType enum value + EmbeddingIdentityTypeAnonymous = "ANONYMOUS" +) + +// EmbeddingIdentityType_Values returns all elements of the EmbeddingIdentityType enum +func EmbeddingIdentityType_Values() []string { + return []string{ + EmbeddingIdentityTypeIam, + EmbeddingIdentityTypeQuicksight, + EmbeddingIdentityTypeAnonymous, + } +} + const ( // ExceptionResourceTypeUser is a ExceptionResourceType enum value ExceptionResourceTypeUser = "USER" diff --git a/service/quicksight/errors.go b/service/quicksight/errors.go index 4f8ac36a9ee..34fe15ec6cf 100644 --- a/service/quicksight/errors.go +++ b/service/quicksight/errors.go @@ -106,6 +106,20 @@ const ( // Access is throttled. ErrCodeThrottlingException = "ThrottlingException" + // ErrCodeUnsupportedPricingPlanException for service response error code + // "UnsupportedPricingPlanException". + // + // This error indicates that you are calling an embedding operation in Amazon + // QuickSight without the required pricing plan on your AWS account. Before + // you can use anonymous embedding, a QuickSight administrator needs to add + // capacity pricing to QuickSight. You can do this on the Manage QuickSight + // page. + // + // After capacity pricing is added, you can enable anonymous embedding by using + // the GetDashboardEmbedUrl API operation with the --identity-type ANONYMOUS + // option. + ErrCodeUnsupportedPricingPlanException = "UnsupportedPricingPlanException" + // ErrCodeUnsupportedUserEditionException for service response error code // "UnsupportedUserEditionException". // @@ -140,6 +154,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ResourceUnavailableException": newErrorResourceUnavailableException, "SessionLifetimeInMinutesInvalidException": newErrorSessionLifetimeInMinutesInvalidException, "ThrottlingException": newErrorThrottlingException, + "UnsupportedPricingPlanException": newErrorUnsupportedPricingPlanException, "UnsupportedUserEditionException": newErrorUnsupportedUserEditionException, "QuickSightUserNotFoundException": newErrorUserNotFoundException, } diff --git a/service/sfn/api.go b/service/sfn/api.go index c82f0daa04f..a13ed786d19 100644 --- a/service/sfn/api.go +++ b/service/sfn/api.go @@ -1914,6 +1914,101 @@ func (c *SFN) StartExecutionWithContext(ctx aws.Context, input *StartExecutionIn return out, req.Send() } +const opStartSyncExecution = "StartSyncExecution" + +// StartSyncExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StartSyncExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartSyncExecution for more information on using the StartSyncExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartSyncExecutionRequest method. +// req, resp := client.StartSyncExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/StartSyncExecution +func (c *SFN) StartSyncExecutionRequest(input *StartSyncExecutionInput) (req *request.Request, output *StartSyncExecutionOutput) { + op := &request.Operation{ + Name: opStartSyncExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartSyncExecutionInput{} + } + + output = &StartSyncExecutionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("sync-", nil)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// StartSyncExecution API operation for AWS Step Functions. +// +// Starts a Synchronous Express state machine execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Step Functions's +// API operation StartSyncExecution for usage and error information. +// +// Returned Error Types: +// * InvalidArn +// The provided Amazon Resource Name (ARN) is invalid. +// +// * InvalidExecutionInput +// The provided JSON input data is invalid. +// +// * InvalidName +// The provided name is invalid. +// +// * StateMachineDoesNotExist +// The specified state machine does not exist. +// +// * StateMachineDeleting +// The specified state machine is being deleted. +// +// * StateMachineTypeNotSupported +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/StartSyncExecution +func (c *SFN) StartSyncExecution(input *StartSyncExecutionInput) (*StartSyncExecutionOutput, error) { + req, out := c.StartSyncExecutionRequest(input) + return out, req.Send() +} + +// StartSyncExecutionWithContext is the same as StartSyncExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StartSyncExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SFN) StartSyncExecutionWithContext(ctx aws.Context, input *StartSyncExecutionInput, opts ...request.Option) (*StartSyncExecutionOutput, error) { + req, out := c.StartSyncExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStopExecution = "StopExecution" // StopExecutionRequest generates a "aws/request.Request" representing the @@ -2740,6 +2835,39 @@ func (s *ActivityWorkerLimitExceeded) RequestID() string { return s.RespMetadata.RequestID } +// An object that describes workflow billing details. +type BillingDetails struct { + _ struct{} `type:"structure"` + + // Billed duration of your workflow, in milliseconds. + BilledDurationInMilliseconds *int64 `locationName:"billedDurationInMilliseconds" type:"long"` + + // Billed memory consumption of your workflow, in MB. + BilledMemoryUsedInMB *int64 `locationName:"billedMemoryUsedInMB" type:"long"` +} + +// String returns the string representation +func (s BillingDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BillingDetails) GoString() string { + return s.String() +} + +// SetBilledDurationInMilliseconds sets the BilledDurationInMilliseconds field's value. +func (s *BillingDetails) SetBilledDurationInMilliseconds(v int64) *BillingDetails { + s.BilledDurationInMilliseconds = &v + return s +} + +// SetBilledMemoryUsedInMB sets the BilledMemoryUsedInMB field's value. +func (s *BillingDetails) SetBilledMemoryUsedInMB(v int64) *BillingDetails { + s.BilledMemoryUsedInMB = &v + return s +} + // Provides details about execution input or output. type CloudWatchEventsExecutionDataDetails struct { _ struct{} `type:"structure"` @@ -3373,7 +3501,7 @@ func (s *DescribeExecutionInput) SetExecutionArn(v string) *DescribeExecutionInp type DescribeExecutionOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that id entifies the execution. + // The Amazon Resource Name (ARN) that identifies the execution. // // ExecutionArn is a required field ExecutionArn *string `locationName:"executionArn" min:"1" type:"string" required:"true"` @@ -3431,7 +3559,7 @@ type DescribeExecutionOutput struct { // If the execution has already ended, the date the execution stopped. StopDate *time.Time `locationName:"stopDate" type:"timestamp"` - // The AWS X-Ray trace header which was passed to the execution. + // The AWS X-Ray trace header that was passed to the execution. TraceHeader *string `locationName:"traceHeader" type:"string"` } @@ -4050,7 +4178,7 @@ func (s *ExecutionLimitExceeded) RequestID() string { type ExecutionListItem struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that id entifies the execution. + // The Amazon Resource Name (ARN) that identifies the execution. // // ExecutionArn is a required field ExecutionArn *string `locationName:"executionArn" min:"1" type:"string" required:"true"` @@ -6462,7 +6590,7 @@ func (s *StartExecutionInput) SetTraceHeader(v string) *StartExecutionInput { type StartExecutionOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that id entifies the execution. + // The Amazon Resource Name (ARN) that identifies the execution. // // ExecutionArn is a required field ExecutionArn *string `locationName:"executionArn" min:"1" type:"string" required:"true"` @@ -6495,6 +6623,240 @@ func (s *StartExecutionOutput) SetStartDate(v time.Time) *StartExecutionOutput { return s } +type StartSyncExecutionInput struct { + _ struct{} `type:"structure"` + + // The string that contains the JSON input data for the execution, for example: + // + // "input": "{\"first_name\" : \"test\"}" + // + // If you don't include any JSON input data, you still must include the two + // braces, for example: "input": "{}" + // + // Length constraints apply to the payload size, and are expressed as bytes + // in UTF-8 encoding. + Input *string `locationName:"input" type:"string" sensitive:"true"` + + // The name of the execution. + Name *string `locationName:"name" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the state machine to execute. + // + // StateMachineArn is a required field + StateMachineArn *string `locationName:"stateMachineArn" min:"1" type:"string" required:"true"` + + // Passes the AWS X-Ray trace header. The trace header can also be passed in + // the request payload. + TraceHeader *string `locationName:"traceHeader" type:"string"` +} + +// String returns the string representation +func (s StartSyncExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartSyncExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartSyncExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartSyncExecutionInput"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.StateMachineArn == nil { + invalidParams.Add(request.NewErrParamRequired("StateMachineArn")) + } + if s.StateMachineArn != nil && len(*s.StateMachineArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StateMachineArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInput sets the Input field's value. +func (s *StartSyncExecutionInput) SetInput(v string) *StartSyncExecutionInput { + s.Input = &v + return s +} + +// SetName sets the Name field's value. +func (s *StartSyncExecutionInput) SetName(v string) *StartSyncExecutionInput { + s.Name = &v + return s +} + +// SetStateMachineArn sets the StateMachineArn field's value. +func (s *StartSyncExecutionInput) SetStateMachineArn(v string) *StartSyncExecutionInput { + s.StateMachineArn = &v + return s +} + +// SetTraceHeader sets the TraceHeader field's value. +func (s *StartSyncExecutionInput) SetTraceHeader(v string) *StartSyncExecutionInput { + s.TraceHeader = &v + return s +} + +type StartSyncExecutionOutput struct { + _ struct{} `type:"structure"` + + // An object that describes workflow billing details, including billed duration + // and memory use. + BillingDetails *BillingDetails `locationName:"billingDetails" type:"structure"` + + // A more detailed explanation of the cause of the failure. + Cause *string `locationName:"cause" type:"string" sensitive:"true"` + + // The error code of the failure. + Error *string `locationName:"error" type:"string" sensitive:"true"` + + // The Amazon Resource Name (ARN) that identifies the execution. + // + // ExecutionArn is a required field + ExecutionArn *string `locationName:"executionArn" min:"1" type:"string" required:"true"` + + // The string that contains the JSON input data of the execution. Length constraints + // apply to the payload size, and are expressed as bytes in UTF-8 encoding. + Input *string `locationName:"input" type:"string" sensitive:"true"` + + // Provides details about execution input or output. + InputDetails *CloudWatchEventsExecutionDataDetails `locationName:"inputDetails" type:"structure"` + + // The name of the execution. + Name *string `locationName:"name" min:"1" type:"string"` + + // The JSON output data of the execution. Length constraints apply to the payload + // size, and are expressed as bytes in UTF-8 encoding. + // + // This field is set only if the execution succeeds. If the execution fails, + // this field is null. + Output *string `locationName:"output" type:"string" sensitive:"true"` + + // Provides details about execution input or output. + OutputDetails *CloudWatchEventsExecutionDataDetails `locationName:"outputDetails" type:"structure"` + + // The date the execution is started. + // + // StartDate is a required field + StartDate *time.Time `locationName:"startDate" type:"timestamp" required:"true"` + + // The Amazon Resource Name (ARN) that identifies the state machine. + StateMachineArn *string `locationName:"stateMachineArn" min:"1" type:"string"` + + // The current status of the execution. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"SyncExecutionStatus"` + + // If the execution has already ended, the date the execution stopped. + // + // StopDate is a required field + StopDate *time.Time `locationName:"stopDate" type:"timestamp" required:"true"` + + // The AWS X-Ray trace header that was passed to the execution. + TraceHeader *string `locationName:"traceHeader" type:"string"` +} + +// String returns the string representation +func (s StartSyncExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartSyncExecutionOutput) GoString() string { + return s.String() +} + +// SetBillingDetails sets the BillingDetails field's value. +func (s *StartSyncExecutionOutput) SetBillingDetails(v *BillingDetails) *StartSyncExecutionOutput { + s.BillingDetails = v + return s +} + +// SetCause sets the Cause field's value. +func (s *StartSyncExecutionOutput) SetCause(v string) *StartSyncExecutionOutput { + s.Cause = &v + return s +} + +// SetError sets the Error field's value. +func (s *StartSyncExecutionOutput) SetError(v string) *StartSyncExecutionOutput { + s.Error = &v + return s +} + +// SetExecutionArn sets the ExecutionArn field's value. +func (s *StartSyncExecutionOutput) SetExecutionArn(v string) *StartSyncExecutionOutput { + s.ExecutionArn = &v + return s +} + +// SetInput sets the Input field's value. +func (s *StartSyncExecutionOutput) SetInput(v string) *StartSyncExecutionOutput { + s.Input = &v + return s +} + +// SetInputDetails sets the InputDetails field's value. +func (s *StartSyncExecutionOutput) SetInputDetails(v *CloudWatchEventsExecutionDataDetails) *StartSyncExecutionOutput { + s.InputDetails = v + return s +} + +// SetName sets the Name field's value. +func (s *StartSyncExecutionOutput) SetName(v string) *StartSyncExecutionOutput { + s.Name = &v + return s +} + +// SetOutput sets the Output field's value. +func (s *StartSyncExecutionOutput) SetOutput(v string) *StartSyncExecutionOutput { + s.Output = &v + return s +} + +// SetOutputDetails sets the OutputDetails field's value. +func (s *StartSyncExecutionOutput) SetOutputDetails(v *CloudWatchEventsExecutionDataDetails) *StartSyncExecutionOutput { + s.OutputDetails = v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *StartSyncExecutionOutput) SetStartDate(v time.Time) *StartSyncExecutionOutput { + s.StartDate = &v + return s +} + +// SetStateMachineArn sets the StateMachineArn field's value. +func (s *StartSyncExecutionOutput) SetStateMachineArn(v string) *StartSyncExecutionOutput { + s.StateMachineArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartSyncExecutionOutput) SetStatus(v string) *StartSyncExecutionOutput { + s.Status = &v + return s +} + +// SetStopDate sets the StopDate field's value. +func (s *StartSyncExecutionOutput) SetStopDate(v time.Time) *StartSyncExecutionOutput { + s.StopDate = &v + return s +} + +// SetTraceHeader sets the TraceHeader field's value. +func (s *StartSyncExecutionOutput) SetTraceHeader(v string) *StartSyncExecutionOutput { + s.TraceHeader = &v + return s +} + // Contains details about a state entered during an execution. type StateEnteredEventDetails struct { _ struct{} `type:"structure"` @@ -8311,3 +8673,23 @@ func StateMachineType_Values() []string { StateMachineTypeExpress, } } + +const ( + // SyncExecutionStatusSucceeded is a SyncExecutionStatus enum value + SyncExecutionStatusSucceeded = "SUCCEEDED" + + // SyncExecutionStatusFailed is a SyncExecutionStatus enum value + SyncExecutionStatusFailed = "FAILED" + + // SyncExecutionStatusTimedOut is a SyncExecutionStatus enum value + SyncExecutionStatusTimedOut = "TIMED_OUT" +) + +// SyncExecutionStatus_Values returns all elements of the SyncExecutionStatus enum +func SyncExecutionStatus_Values() []string { + return []string{ + SyncExecutionStatusSucceeded, + SyncExecutionStatusFailed, + SyncExecutionStatusTimedOut, + } +} diff --git a/service/sfn/sfniface/interface.go b/service/sfn/sfniface/interface.go index 198560854cf..ede61bab386 100644 --- a/service/sfn/sfniface/interface.go +++ b/service/sfn/sfniface/interface.go @@ -144,6 +144,10 @@ type SFNAPI interface { StartExecutionWithContext(aws.Context, *sfn.StartExecutionInput, ...request.Option) (*sfn.StartExecutionOutput, error) StartExecutionRequest(*sfn.StartExecutionInput) (*request.Request, *sfn.StartExecutionOutput) + StartSyncExecution(*sfn.StartSyncExecutionInput) (*sfn.StartSyncExecutionOutput, error) + StartSyncExecutionWithContext(aws.Context, *sfn.StartSyncExecutionInput, ...request.Option) (*sfn.StartSyncExecutionOutput, error) + StartSyncExecutionRequest(*sfn.StartSyncExecutionInput) (*request.Request, *sfn.StartSyncExecutionOutput) + StopExecution(*sfn.StopExecutionInput) (*sfn.StopExecutionOutput, error) StopExecutionWithContext(aws.Context, *sfn.StopExecutionInput, ...request.Option) (*sfn.StopExecutionOutput, error) StopExecutionRequest(*sfn.StopExecutionInput) (*request.Request, *sfn.StopExecutionOutput) diff --git a/service/timestreamwrite/api.go b/service/timestreamwrite/api.go index 311cf39c0bc..0c508cd6edd 100644 --- a/service/timestreamwrite/api.go +++ b/service/timestreamwrite/api.go @@ -352,6 +352,9 @@ func (c *TimestreamWrite) DeleteDatabaseRequest(input *DeleteDatabaseInput) (req // All tables in the database must be deleted first, or a ValidationException // error will be thrown. // +// Due to the nature of distributed retries, the operation can return either +// success or a ResourceNotFoundException. Clients should consider them equivalent. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -475,6 +478,9 @@ func (c *TimestreamWrite) DeleteTableRequest(input *DeleteTableInput) (req *requ // a Timestream database table is deleted, the time series data stored in the // table cannot be recovered. // +// Due to the nature of distributed retries, the operation can return either +// success or a ResourceNotFoundException. Clients should consider them equivalent. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1381,6 +1387,10 @@ func (c *TimestreamWrite) ListTagsForResourceRequest(input *ListTagsForResourceI // The operation tried to access a nonexistent resource. The resource might // not be specified correctly, or its status might not be ACTIVE. // +// * ThrottlingException +// Too many requests were made by a user exceeding service quotas. The request +// was throttled. +// // * ValidationException // Invalid or malformed request. // @@ -1496,6 +1506,10 @@ func (c *TimestreamWrite) TagResourceRequest(input *TagResourceInput) (req *requ // * ServiceQuotaExceededException // Instance quota of resource exceeded for this account. // +// * ThrottlingException +// Too many requests were made by a user exceeding service quotas. The request +// was throttled. +// // * ValidationException // Invalid or malformed request. // @@ -1608,6 +1622,10 @@ func (c *TimestreamWrite) UntagResourceRequest(input *UntagResourceInput) (req * // * ServiceQuotaExceededException // Instance quota of resource exceeded for this account. // +// * ThrottlingException +// Too many requests were made by a user exceeding service quotas. The request +// was throttled. +// // * ResourceNotFoundException // The operation tried to access a nonexistent resource. The resource might // not be specified correctly, or its status might not be ACTIVE. @@ -2759,8 +2777,9 @@ type Dimension struct { // Dimension represents the meta data attributes of the time series. For example, // the name and availability zone of an EC2 instance or the name of the manufacturer - // of a wind turbine are dimensions. Dimension names can only contain alphanumeric - // characters and underscores. Dimension names cannot end with an underscore. + // of a wind turbine are dimensions. + // + // For constraints on Dimension names, see Naming Constraints (https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html#limits.naming). // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -3238,11 +3257,20 @@ type Record struct { MeasureValueType *string `type:"string" enum:"MeasureValueType"` // Contains the time at which the measure value for the data point was collected. + // The time value plus the unit provides the time elapsed since the epoch. For + // example, if the time value is 12345 and the unit is ms, then 12345 ms have + // elapsed since the epoch. Time *string `min:"1" type:"string"` // The granularity of the timestamp unit. It indicates if the time value is // in seconds, milliseconds, nanoseconds or other supported values. TimeUnit *string `type:"string" enum:"TimeUnit"` + + // 64-bit attribute used for record updates. Write requests for duplicate data + // with a higher version number will update the existing measure value and version. + // In cases where the measure value is the same, Version will still be updated + // . Default value is to 1. + Version *int64 `type:"long"` } // String returns the string representation @@ -3320,12 +3348,23 @@ func (s *Record) SetTimeUnit(v string) *Record { return s } +// SetVersion sets the Version field's value. +func (s *Record) SetVersion(v int64) *Record { + s.Version = &v + return s +} + // Records that were not successfully inserted into Timestream due to data validation // issues that must be resolved prior to reinserting time series data into the // system. type RejectedRecord struct { _ struct{} `type:"structure"` + // The existing version of the record. This value is populated in scenarios + // where an identical record exists with a higher version than the version in + // the write request. + ExistingVersion *int64 `type:"long"` + // The reason why a record was not successfully inserted into Timestream. Possible // causes of failure include: // @@ -3333,7 +3372,12 @@ type RejectedRecord struct { // same dimensions, timestamps, and measure names but different measure values. // // * Records with timestamps that lie outside the retention duration of the - // memory store + // memory store When the retention window is updated, you will receive a + // RejectedRecords exception if you immediately try to ingest data within + // the new window. To avoid a RejectedRecords exception, wait until the duration + // of the new window to ingest new data. For further information, see Best + // Practices for Configuring Timestream (https://docs.aws.amazon.com/timestream/latest/developerguide/best-practices.html#configuration) + // and the explanation of how storage works in Timestream (https://docs.aws.amazon.com/timestream/latest/developerguide/storage.html). // // * Records with dimensions or measures that exceed the Timestream defined // limits. @@ -3357,6 +3401,12 @@ func (s RejectedRecord) GoString() string { return s.String() } +// SetExistingVersion sets the ExistingVersion field's value. +func (s *RejectedRecord) SetExistingVersion(v int64) *RejectedRecord { + s.ExistingVersion = &v + return s +} + // SetReason sets the Reason field's value. func (s *RejectedRecord) SetReason(v string) *RejectedRecord { s.Reason = &v diff --git a/service/transcribestreamingservice/api.go b/service/transcribestreamingservice/api.go index fa983177982..f439fb83d5a 100644 --- a/service/transcribestreamingservice/api.go +++ b/service/transcribestreamingservice/api.go @@ -22,50 +22,50 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) -const opStartStreamTranscription = "StartStreamTranscription" +const opStartMedicalStreamTranscription = "StartMedicalStreamTranscription" -// StartStreamTranscriptionRequest generates a "aws/request.Request" representing the -// client's request for the StartStreamTranscription operation. The "output" return +// StartMedicalStreamTranscriptionRequest generates a "aws/request.Request" representing the +// client's request for the StartMedicalStreamTranscription operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartStreamTranscription for more information on using the StartStreamTranscription +// See StartMedicalStreamTranscription for more information on using the StartMedicalStreamTranscription // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartStreamTranscriptionRequest method. -// req, resp := client.StartStreamTranscriptionRequest(params) +// // Example sending a request using the StartMedicalStreamTranscriptionRequest method. +// req, resp := client.StartMedicalStreamTranscriptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartStreamTranscription -func (c *TranscribeStreamingService) StartStreamTranscriptionRequest(input *StartStreamTranscriptionInput) (req *request.Request, output *StartStreamTranscriptionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartMedicalStreamTranscription +func (c *TranscribeStreamingService) StartMedicalStreamTranscriptionRequest(input *StartMedicalStreamTranscriptionInput) (req *request.Request, output *StartMedicalStreamTranscriptionOutput) { op := &request.Operation{ - Name: opStartStreamTranscription, + Name: opStartMedicalStreamTranscription, HTTPMethod: "POST", - HTTPPath: "/stream-transcription", + HTTPPath: "/medical-stream-transcription", } if input == nil { - input = &StartStreamTranscriptionInput{} + input = &StartMedicalStreamTranscriptionInput{} } - output = &StartStreamTranscriptionOutput{} + output = &StartMedicalStreamTranscriptionOutput{} req = c.newRequest(op, input, output) req.Handlers.UnmarshalMeta.PushBack( protocol.RequireHTTPMinProtocol{Major: 2}.Handler, ) - es := NewStartStreamTranscriptionEventStream() + es := NewStartMedicalStreamTranscriptionEventStream() output.eventStream = es req.Handlers.Sign.PushFront(es.setupInputPipe) @@ -84,33 +84,24 @@ func (c *TranscribeStreamingService) StartStreamTranscriptionRequest(input *Star return } -// StartStreamTranscription API operation for Amazon Transcribe Streaming Service. -// -// Starts a bidirectional HTTP2 stream where audio is streamed to Amazon Transcribe -// and the transcription results are streamed to your application. -// -// The following are encoded as HTTP2 headers: -// -// * x-amzn-transcribe-language-code -// -// * x-amzn-transcribe-media-encoding -// -// * x-amzn-transcribe-sample-rate +// StartMedicalStreamTranscription API operation for Amazon Transcribe Streaming Service. // -// * x-amzn-transcribe-session-id +// Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon Transcribe +// Medical and the transcription results are streamed to your application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Transcribe Streaming Service's -// API operation StartStreamTranscription for usage and error information. +// API operation StartMedicalStreamTranscription for usage and error information. // // Returned Error Types: // * BadRequestException -// One or more arguments to the StartStreamTranscription operation was invalid. -// For example, MediaEncoding was not set to pcm or LanguageCode was not set -// to a valid code. Check the parameters and try your request again. +// One or more arguments to the StartStreamTranscription or StartMedicalStreamTranscription +// operation was invalid. For example, MediaEncoding was not set to a valid +// encoding, or LanguageCode was not set to a valid code. Check the parameters +// and try your request again. // // * LimitExceededException // You have exceeded the maximum number of concurrent transcription streams, @@ -119,8 +110,8 @@ func (c *TranscribeStreamingService) StartStreamTranscriptionRequest(input *Star // stream into smaller chunks and try your request again. // // * InternalFailureException -// A problem occurred while processing the audio. Amazon Transcribe terminated -// processing. Try your request again. +// A problem occurred while processing the audio. Amazon Transcribe or Amazon +// Transcribe Medical terminated processing. Try your request again. // // * ConflictException // A new stream started with the same session ID. The current stream has been @@ -129,23 +120,23 @@ func (c *TranscribeStreamingService) StartStreamTranscriptionRequest(input *Star // * ServiceUnavailableException // Service is currently unavailable. Try your request later. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartStreamTranscription -func (c *TranscribeStreamingService) StartStreamTranscription(input *StartStreamTranscriptionInput) (*StartStreamTranscriptionOutput, error) { - req, out := c.StartStreamTranscriptionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartMedicalStreamTranscription +func (c *TranscribeStreamingService) StartMedicalStreamTranscription(input *StartMedicalStreamTranscriptionInput) (*StartMedicalStreamTranscriptionOutput, error) { + req, out := c.StartMedicalStreamTranscriptionRequest(input) return out, req.Send() } -// StartStreamTranscriptionWithContext is the same as StartStreamTranscription with the addition of +// StartMedicalStreamTranscriptionWithContext is the same as StartMedicalStreamTranscription with the addition of // the ability to pass a context and additional request options. // -// See StartStreamTranscription for details on how to use this API operation. +// See StartMedicalStreamTranscription for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *TranscribeStreamingService) StartStreamTranscriptionWithContext(ctx aws.Context, input *StartStreamTranscriptionInput, opts ...request.Option) (*StartStreamTranscriptionOutput, error) { - req, out := c.StartStreamTranscriptionRequest(input) +func (c *TranscribeStreamingService) StartMedicalStreamTranscriptionWithContext(ctx aws.Context, input *StartMedicalStreamTranscriptionInput, opts ...request.Option) (*StartMedicalStreamTranscriptionOutput, error) { + req, out := c.StartMedicalStreamTranscriptionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -153,12 +144,12 @@ func (c *TranscribeStreamingService) StartStreamTranscriptionWithContext(ctx aws var _ awserr.Error -// StartStreamTranscriptionEventStream provides the event stream handling for the StartStreamTranscription. +// StartMedicalStreamTranscriptionEventStream provides the event stream handling for the StartMedicalStreamTranscription. // // For testing and mocking the event stream this type should be initialized via -// the NewStartStreamTranscriptionEventStream constructor function. Using the functional options +// the NewStartMedicalStreamTranscriptionEventStream constructor function. Using the functional options // to pass in nested mock behavior. -type StartStreamTranscriptionEventStream struct { +type StartMedicalStreamTranscriptionEventStream struct { // Writer is the EventStream writer for the AudioStream // events. This value is automatically set by the SDK when the API call is made @@ -170,13 +161,13 @@ type StartStreamTranscriptionEventStream struct { inputWriter io.WriteCloser - // Reader is the EventStream reader for the TranscriptResultStream + // Reader is the EventStream reader for the MedicalTranscriptResultStream // events. This value is automatically set by the SDK when the API call is made // Use this member when unit testing your code with the SDK to mock out the // EventStream Reader. // // Must not be nil. - Reader TranscriptResultStreamReader + Reader MedicalTranscriptResultStreamReader outputReader io.ReadCloser @@ -185,20 +176,20 @@ type StartStreamTranscriptionEventStream struct { err *eventstreamapi.OnceError } -// NewStartStreamTranscriptionEventStream initializes an StartStreamTranscriptionEventStream. -// This function should only be used for testing and mocking the StartStreamTranscriptionEventStream +// NewStartMedicalStreamTranscriptionEventStream initializes an StartMedicalStreamTranscriptionEventStream. +// This function should only be used for testing and mocking the StartMedicalStreamTranscriptionEventStream // stream within your application. // // The Writer member must be set before writing events to the stream. // // The Reader member must be set before reading events from the stream. // -// es := NewStartStreamTranscriptionEventStream(func(o *StartStreamTranscriptionEventStream{ +// es := NewStartMedicalStreamTranscriptionEventStream(func(o *StartMedicalStreamTranscriptionEventStream{ // es.Writer = myMockStreamWriter // es.Reader = myMockStreamReader // }) -func NewStartStreamTranscriptionEventStream(opts ...func(*StartStreamTranscriptionEventStream)) *StartStreamTranscriptionEventStream { - es := &StartStreamTranscriptionEventStream{ +func NewStartMedicalStreamTranscriptionEventStream(opts ...func(*StartMedicalStreamTranscriptionEventStream)) *StartMedicalStreamTranscriptionEventStream { + es := &StartMedicalStreamTranscriptionEventStream{ done: make(chan struct{}), err: eventstreamapi.NewOnceError(), } @@ -210,7 +201,7 @@ func NewStartStreamTranscriptionEventStream(opts ...func(*StartStreamTranscripti return es } -func (es *StartStreamTranscriptionEventStream) runOnStreamPartClose(r *request.Request) { +func (es *StartMedicalStreamTranscriptionEventStream) runOnStreamPartClose(r *request.Request) { if es.done == nil { return } @@ -218,7 +209,7 @@ func (es *StartStreamTranscriptionEventStream) runOnStreamPartClose(r *request.R } -func (es *StartStreamTranscriptionEventStream) waitStreamPartClose() { +func (es *StartMedicalStreamTranscriptionEventStream) waitStreamPartClose() { var inputErrCh <-chan struct{} if v, ok := es.Writer.(interface{ ErrorSet() <-chan struct{} }); ok { inputErrCh = v.ErrorSet() @@ -248,7 +239,7 @@ func (es *StartStreamTranscriptionEventStream) waitStreamPartClose() { } } -func (es *StartStreamTranscriptionEventStream) setupInputPipe(r *request.Request) { +func (es *StartMedicalStreamTranscriptionEventStream) setupInputPipe(r *request.Request) { inputReader, inputWriter := io.Pipe() r.SetStreamingBody(inputReader) es.inputWriter = inputWriter @@ -260,11 +251,11 @@ func (es *StartStreamTranscriptionEventStream) setupInputPipe(r *request.Request // These events are: // // * AudioEvent -func (es *StartStreamTranscriptionEventStream) Send(ctx aws.Context, event AudioStreamEvent) error { +func (es *StartMedicalStreamTranscriptionEventStream) Send(ctx aws.Context, event AudioStreamEvent) error { return es.Writer.Send(ctx, event) } -func (es *StartStreamTranscriptionEventStream) runInputStream(r *request.Request) { +func (es *StartMedicalStreamTranscriptionEventStream) runInputStream(r *request.Request) { var opts []func(*eventstream.Encoder) if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { opts = append(opts, eventstream.EncodeWithLogger(r.Config.Logger)) @@ -303,19 +294,19 @@ func (es *StartStreamTranscriptionEventStream) runInputStream(r *request.Request // // These events are: // -// * TranscriptEvent -// * TranscriptResultStreamUnknownEvent -func (es *StartStreamTranscriptionEventStream) Events() <-chan TranscriptResultStreamEvent { +// * MedicalTranscriptEvent +// * MedicalTranscriptResultStreamUnknownEvent +func (es *StartMedicalStreamTranscriptionEventStream) Events() <-chan MedicalTranscriptResultStreamEvent { return es.Reader.Events() } -func (es *StartStreamTranscriptionEventStream) runOutputStream(r *request.Request) { +func (es *StartMedicalStreamTranscriptionEventStream) runOutputStream(r *request.Request) { var opts []func(*eventstream.Decoder) if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) } - unmarshalerForEvent := unmarshalerForTranscriptResultStreamEvent{ + unmarshalerForEvent := unmarshalerForMedicalTranscriptResultStreamEvent{ metadata: protocol.ResponseMetadata{ StatusCode: r.HTTPResponse.StatusCode, RequestID: r.RequestID, @@ -331,7 +322,7 @@ func (es *StartStreamTranscriptionEventStream) runOutputStream(r *request.Reques ) es.outputReader = r.HTTPResponse.Body - es.Reader = newReadTranscriptResultStream(eventReader) + es.Reader = newReadMedicalTranscriptResultStream(eventReader) } // Close closes the stream. This will also cause the stream to be closed. @@ -344,12 +335,12 @@ func (es *StartStreamTranscriptionEventStream) runOutputStream(r *request.Reques // You can use the closing of the Reader's Events channel to terminate your // application's read from the API's stream. // -func (es *StartStreamTranscriptionEventStream) Close() (err error) { +func (es *StartMedicalStreamTranscriptionEventStream) Close() (err error) { es.closeOnce.Do(es.safeClose) return es.Err() } -func (es *StartStreamTranscriptionEventStream) safeClose() { +func (es *StartMedicalStreamTranscriptionEventStream) safeClose() { if es.done != nil { close(es.done) } @@ -379,7 +370,7 @@ func (es *StartStreamTranscriptionEventStream) safeClose() { // Err returns any error that occurred while reading or writing EventStream // Events from the service API's response. Returns nil if there were no errors. -func (es *StartStreamTranscriptionEventStream) Err() error { +func (es *StartMedicalStreamTranscriptionEventStream) Err() error { if err := es.err.Err(); err != nil { return err } @@ -393,82 +384,454 @@ func (es *StartStreamTranscriptionEventStream) Err() error { return nil } -// A list of possible transcriptions for the audio. -type Alternative struct { - _ struct{} `type:"structure"` +const opStartStreamTranscription = "StartStreamTranscription" - // One or more alternative interpretations of the input audio. - Items []*Item `type:"list"` +// StartStreamTranscriptionRequest generates a "aws/request.Request" representing the +// client's request for the StartStreamTranscription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartStreamTranscription for more information on using the StartStreamTranscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartStreamTranscriptionRequest method. +// req, resp := client.StartStreamTranscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartStreamTranscription +func (c *TranscribeStreamingService) StartStreamTranscriptionRequest(input *StartStreamTranscriptionInput) (req *request.Request, output *StartStreamTranscriptionOutput) { + op := &request.Operation{ + Name: opStartStreamTranscription, + HTTPMethod: "POST", + HTTPPath: "/stream-transcription", + } - // The text that was transcribed from the audio. - Transcript *string `type:"string"` -} + if input == nil { + input = &StartStreamTranscriptionInput{} + } -// String returns the string representation -func (s Alternative) String() string { - return awsutil.Prettify(s) -} + output = &StartStreamTranscriptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.UnmarshalMeta.PushBack( + protocol.RequireHTTPMinProtocol{Major: 2}.Handler, + ) -// GoString returns the string representation -func (s Alternative) GoString() string { - return s.String() + es := NewStartStreamTranscriptionEventStream() + output.eventStream = es + + req.Handlers.Sign.PushFront(es.setupInputPipe) + req.Handlers.Build.PushBack(request.WithSetRequestHeaders(map[string]string{ + "Content-Type": "application/vnd.amazon.eventstream", + "X-Amz-Content-Sha256": "STREAMING-AWS4-HMAC-SHA256-EVENTS", + })) + req.Handlers.Build.Swap(restjson.BuildHandler.Name, rest.BuildHandler) + req.Handlers.Send.Swap(client.LogHTTPRequestHandler.Name, client.LogHTTPRequestHeaderHandler) + req.Handlers.Unmarshal.PushBack(es.runInputStream) + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + return } -// SetItems sets the Items field's value. -func (s *Alternative) SetItems(v []*Item) *Alternative { - s.Items = v - return s +// StartStreamTranscription API operation for Amazon Transcribe Streaming Service. +// +// Starts a bidirectional HTTP2 stream where audio is streamed to Amazon Transcribe +// and the transcription results are streamed to your application. +// +// The following are encoded as HTTP2 headers: +// +// * x-amzn-transcribe-language-code +// +// * x-amzn-transcribe-media-encoding +// +// * x-amzn-transcribe-sample-rate +// +// * x-amzn-transcribe-session-id +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Transcribe Streaming Service's +// API operation StartStreamTranscription for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// One or more arguments to the StartStreamTranscription or StartMedicalStreamTranscription +// operation was invalid. For example, MediaEncoding was not set to a valid +// encoding, or LanguageCode was not set to a valid code. Check the parameters +// and try your request again. +// +// * LimitExceededException +// You have exceeded the maximum number of concurrent transcription streams, +// are starting transcription streams too quickly, or the maximum audio length +// of 4 hours. Wait until a stream has finished processing, or break your audio +// stream into smaller chunks and try your request again. +// +// * InternalFailureException +// A problem occurred while processing the audio. Amazon Transcribe or Amazon +// Transcribe Medical terminated processing. Try your request again. +// +// * ConflictException +// A new stream started with the same session ID. The current stream has been +// terminated. +// +// * ServiceUnavailableException +// Service is currently unavailable. Try your request later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/transcribe-streaming-2017-10-26/StartStreamTranscription +func (c *TranscribeStreamingService) StartStreamTranscription(input *StartStreamTranscriptionInput) (*StartStreamTranscriptionOutput, error) { + req, out := c.StartStreamTranscriptionRequest(input) + return out, req.Send() } -// SetTranscript sets the Transcript field's value. -func (s *Alternative) SetTranscript(v string) *Alternative { - s.Transcript = &v - return s +// StartStreamTranscriptionWithContext is the same as StartStreamTranscription with the addition of +// the ability to pass a context and additional request options. +// +// See StartStreamTranscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *TranscribeStreamingService) StartStreamTranscriptionWithContext(ctx aws.Context, input *StartStreamTranscriptionInput, opts ...request.Option) (*StartStreamTranscriptionOutput, error) { + req, out := c.StartStreamTranscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// Provides a wrapper for the audio chunks that you are sending. -type AudioEvent struct { - _ struct{} `type:"structure" payload:"AudioChunk"` +var _ awserr.Error - // An audio blob that contains the next part of the audio that you want to transcribe. - // - // AudioChunk is automatically base64 encoded/decoded by the SDK. - AudioChunk []byte `type:"blob"` -} +// StartStreamTranscriptionEventStream provides the event stream handling for the StartStreamTranscription. +// +// For testing and mocking the event stream this type should be initialized via +// the NewStartStreamTranscriptionEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type StartStreamTranscriptionEventStream struct { -// String returns the string representation -func (s AudioEvent) String() string { - return awsutil.Prettify(s) -} + // Writer is the EventStream writer for the AudioStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Writer. + // + // Must not be nil. + Writer AudioStreamWriter -// GoString returns the string representation -func (s AudioEvent) GoString() string { - return s.String() -} + inputWriter io.WriteCloser -// SetAudioChunk sets the AudioChunk field's value. -func (s *AudioEvent) SetAudioChunk(v []byte) *AudioEvent { - s.AudioChunk = v - return s -} + // Reader is the EventStream reader for the TranscriptResultStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader TranscriptResultStreamReader -// The AudioEvent is and event in the AudioStream group of events. -func (s *AudioEvent) eventAudioStream() {} + outputReader io.ReadCloser -// UnmarshalEvent unmarshals the EventStream Message into the AudioEvent value. -// This method is only used internally within the SDK's EventStream handling. -func (s *AudioEvent) UnmarshalEvent( - payloadUnmarshaler protocol.PayloadUnmarshaler, - msg eventstream.Message, -) error { - s.AudioChunk = make([]byte, len(msg.Payload)) - copy(s.AudioChunk, msg.Payload) - return nil + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError } -// MarshalEvent marshals the type into an stream event value. This method -// should only used internally within the SDK's EventStream handling. -func (s *AudioEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { +// NewStartStreamTranscriptionEventStream initializes an StartStreamTranscriptionEventStream. +// This function should only be used for testing and mocking the StartStreamTranscriptionEventStream +// stream within your application. +// +// The Writer member must be set before writing events to the stream. +// +// The Reader member must be set before reading events from the stream. +// +// es := NewStartStreamTranscriptionEventStream(func(o *StartStreamTranscriptionEventStream{ +// es.Writer = myMockStreamWriter +// es.Reader = myMockStreamReader +// }) +func NewStartStreamTranscriptionEventStream(opts ...func(*StartStreamTranscriptionEventStream)) *StartStreamTranscriptionEventStream { + es := &StartStreamTranscriptionEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +func (es *StartStreamTranscriptionEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *StartStreamTranscriptionEventStream) waitStreamPartClose() { + var inputErrCh <-chan struct{} + if v, ok := es.Writer.(interface{ ErrorSet() <-chan struct{} }); ok { + inputErrCh = v.ErrorSet() + } + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-inputErrCh: + es.err.SetError(es.Writer.Err()) + es.Close() + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +func (es *StartStreamTranscriptionEventStream) setupInputPipe(r *request.Request) { + inputReader, inputWriter := io.Pipe() + r.SetStreamingBody(inputReader) + es.inputWriter = inputWriter +} + +// Send writes the event to the stream blocking until the event is written. +// Returns an error if the event was not written. +// +// These events are: +// +// * AudioEvent +func (es *StartStreamTranscriptionEventStream) Send(ctx aws.Context, event AudioStreamEvent) error { + return es.Writer.Send(ctx, event) +} + +func (es *StartStreamTranscriptionEventStream) runInputStream(r *request.Request) { + var opts []func(*eventstream.Encoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.EncodeWithLogger(r.Config.Logger)) + } + var encoder eventstreamapi.Encoder = eventstream.NewEncoder(es.inputWriter, opts...) + + var closer aws.MultiCloser + sigSeed, err := v4.GetSignedRequestSignature(r.HTTPRequest) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "unable to get initial request's signature", err) + return + } + signer := eventstreamapi.NewSignEncoder( + v4.NewStreamSigner(r.ClientInfo.SigningRegion, r.ClientInfo.SigningName, + sigSeed, r.Config.Credentials), + encoder, + ) + encoder = signer + closer = append(closer, signer) + closer = append(closer, es.inputWriter) + + eventWriter := eventstreamapi.NewEventWriter(encoder, + protocol.HandlerPayloadMarshal{ + Marshalers: r.Handlers.BuildStream, + }, + eventTypeForAudioStreamEvent, + ) + + es.Writer = &writeAudioStream{ + StreamWriter: eventstreamapi.NewStreamWriter(eventWriter, closer), + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// * TranscriptEvent +// * TranscriptResultStreamUnknownEvent +func (es *StartStreamTranscriptionEventStream) Events() <-chan TranscriptResultStreamEvent { + return es.Reader.Events() +} + +func (es *StartStreamTranscriptionEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForTranscriptResultStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadTranscriptResultStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// Will close the underlying EventStream writer, and no more events can be +// sent. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +// +func (es *StartStreamTranscriptionEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *StartStreamTranscriptionEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + t := time.NewTicker(time.Second) + defer t.Stop() + writeCloseDone := make(chan error) + go func() { + if err := es.Writer.Close(); err != nil { + es.err.SetError(err) + } + close(writeCloseDone) + }() + select { + case <-t.C: + case <-writeCloseDone: + } + if es.inputWriter != nil { + es.inputWriter.Close() + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *StartStreamTranscriptionEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Writer.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +// A list of possible transcriptions for the audio. +type Alternative struct { + _ struct{} `type:"structure"` + + // One or more alternative interpretations of the input audio. + Items []*Item `type:"list"` + + // The text that was transcribed from the audio. + Transcript *string `type:"string"` +} + +// String returns the string representation +func (s Alternative) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alternative) GoString() string { + return s.String() +} + +// SetItems sets the Items field's value. +func (s *Alternative) SetItems(v []*Item) *Alternative { + s.Items = v + return s +} + +// SetTranscript sets the Transcript field's value. +func (s *Alternative) SetTranscript(v string) *Alternative { + s.Transcript = &v + return s +} + +// Provides a wrapper for the audio chunks that you are sending. +type AudioEvent struct { + _ struct{} `type:"structure" payload:"AudioChunk"` + + // An audio blob that contains the next part of the audio that you want to transcribe. + // + // AudioChunk is automatically base64 encoded/decoded by the SDK. + AudioChunk []byte `type:"blob"` +} + +// String returns the string representation +func (s AudioEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AudioEvent) GoString() string { + return s.String() +} + +// SetAudioChunk sets the AudioChunk field's value. +func (s *AudioEvent) SetAudioChunk(v []byte) *AudioEvent { + s.AudioChunk = v + return s +} + +// The AudioEvent is and event in the AudioStream group of events. +func (s *AudioEvent) eventAudioStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the AudioEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *AudioEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.AudioChunk = make([]byte, len(msg.Payload)) + copy(s.AudioChunk, msg.Payload) + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *AudioEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) msg.Payload = s.AudioChunk @@ -528,9 +891,10 @@ func eventTypeForAudioStreamEvent(event eventstreamapi.Marshaler) (string, error } } -// One or more arguments to the StartStreamTranscription operation was invalid. -// For example, MediaEncoding was not set to pcm or LanguageCode was not set -// to a valid code. Check the parameters and try your request again. +// One or more arguments to the StartStreamTranscription or StartMedicalStreamTranscription +// operation was invalid. For example, MediaEncoding was not set to a valid +// encoding, or LanguageCode was not set to a valid code. Check the parameters +// and try your request again. type BadRequestException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -548,6 +912,9 @@ func (s BadRequestException) GoString() string { return s.String() } +// The BadRequestException is and event in the MedicalTranscriptResultStream group of events. +func (s *BadRequestException) eventMedicalTranscriptResultStream() {} + // The BadRequestException is and event in the TranscriptResultStream group of events. func (s *BadRequestException) eventTranscriptResultStream() {} @@ -634,6 +1001,9 @@ func (s ConflictException) GoString() string { return s.String() } +// The ConflictException is and event in the MedicalTranscriptResultStream group of events. +func (s *ConflictException) eventMedicalTranscriptResultStream() {} + // The ConflictException is and event in the TranscriptResultStream group of events. func (s *ConflictException) eventTranscriptResultStream() {} @@ -701,8 +1071,8 @@ func (s *ConflictException) RequestID() string { return s.RespMetadata.RequestID } -// A problem occurred while processing the audio. Amazon Transcribe terminated -// processing. Try your request again. +// A problem occurred while processing the audio. Amazon Transcribe or Amazon +// Transcribe Medical terminated processing. Try your request again. type InternalFailureException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -720,6 +1090,9 @@ func (s InternalFailureException) GoString() string { return s.String() } +// The InternalFailureException is and event in the MedicalTranscriptResultStream group of events. +func (s *InternalFailureException) eventMedicalTranscriptResultStream() {} + // The InternalFailureException is and event in the TranscriptResultStream group of events. func (s *InternalFailureException) eventTranscriptResultStream() {} @@ -782,114 +1155,431 @@ func (s *InternalFailureException) StatusCode() int { return s.RespMetadata.StatusCode } -// RequestID returns the service's response RequestID for request. -func (s *InternalFailureException) RequestID() string { - return s.RespMetadata.RequestID +// RequestID returns the service's response RequestID for request. +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A word or phrase transcribed from the input audio. +type Item struct { + _ struct{} `type:"structure"` + + // The word or punctuation that was recognized in the input audio. + Content *string `type:"string"` + + // The offset from the beginning of the audio stream to the end of the audio + // that resulted in the item. + EndTime *float64 `type:"double"` + + // If speaker identification is enabled, shows the speakers identified in the + // real-time stream. + Speaker *string `type:"string"` + + // The offset from the beginning of the audio stream to the beginning of the + // audio that resulted in the item. + StartTime *float64 `type:"double"` + + // The type of the item. PRONUNCIATION indicates that the item is a word that + // was recognized in the input audio. PUNCTUATION indicates that the item was + // interpreted as a pause in the input audio. + Type *string `type:"string" enum:"ItemType"` + + // Indicates whether a word in the item matches a word in the vocabulary filter + // you've chosen for your real-time stream. If true then a word in the item + // matches your vocabulary filter. + VocabularyFilterMatch *bool `type:"boolean"` +} + +// String returns the string representation +func (s Item) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Item) GoString() string { + return s.String() +} + +// SetContent sets the Content field's value. +func (s *Item) SetContent(v string) *Item { + s.Content = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *Item) SetEndTime(v float64) *Item { + s.EndTime = &v + return s +} + +// SetSpeaker sets the Speaker field's value. +func (s *Item) SetSpeaker(v string) *Item { + s.Speaker = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Item) SetStartTime(v float64) *Item { + s.StartTime = &v + return s +} + +// SetType sets the Type field's value. +func (s *Item) SetType(v string) *Item { + s.Type = &v + return s +} + +// SetVocabularyFilterMatch sets the VocabularyFilterMatch field's value. +func (s *Item) SetVocabularyFilterMatch(v bool) *Item { + s.VocabularyFilterMatch = &v + return s +} + +// You have exceeded the maximum number of concurrent transcription streams, +// are starting transcription streams too quickly, or the maximum audio length +// of 4 hours. Wait until a stream has finished processing, or break your audio +// stream into smaller chunks and try your request again. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s LimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LimitExceededException) GoString() string { + return s.String() +} + +// The LimitExceededException is and event in the MedicalTranscriptResultStream group of events. +func (s *LimitExceededException) eventMedicalTranscriptResultStream() {} + +// The LimitExceededException is and event in the TranscriptResultStream group of events. +func (s *LimitExceededException) eventTranscriptResultStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the LimitExceededException value. +// This method is only used internally within the SDK's EventStream handling. +func (s *LimitExceededException) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *LimitExceededException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" +} + +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A list of possible transcriptions for the audio. +type MedicalAlternative struct { + _ struct{} `type:"structure"` + + // A list of objects that contains words and punctuation marks that represents + // one or more interpretations of the input audio. + Items []*MedicalItem `type:"list"` + + // The text that was transcribed from the audio. + Transcript *string `type:"string"` +} + +// String returns the string representation +func (s MedicalAlternative) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MedicalAlternative) GoString() string { + return s.String() +} + +// SetItems sets the Items field's value. +func (s *MedicalAlternative) SetItems(v []*MedicalItem) *MedicalAlternative { + s.Items = v + return s +} + +// SetTranscript sets the Transcript field's value. +func (s *MedicalAlternative) SetTranscript(v string) *MedicalAlternative { + s.Transcript = &v + return s } -// A word or phrase transcribed from the input audio. -type Item struct { +// A word or punctuation that is transcribed from the input audio. +type MedicalItem struct { _ struct{} `type:"structure"` - // The word or punctuation that was recognized in the input audio. + // A value between 0 and 1 for an item that is a confidence score that Amazon + // Transcribe Medical assigns to each word that it transcribes. + Confidence *float64 `type:"double"` + + // The word or punctuation mark that was recognized in the input audio. Content *string `type:"string"` - // The offset from the beginning of the audio stream to the end of the audio - // that resulted in the item. + // The number of seconds into an audio stream that indicates the creation time + // of an item. EndTime *float64 `type:"double"` - // If speaker identification is enabled, shows the speakers identified in the - // real-time stream. + // If speaker identification is enabled, shows the integer values that correspond + // to the different speakers identified in the stream. For example, if the value + // of Speaker in the stream is either a 0 or a 1, that indicates that Amazon + // Transcribe Medical has identified two speakers in the stream. The value of + // 0 corresponds to one speaker and the value of 1 corresponds to the other + // speaker. Speaker *string `type:"string"` - // The offset from the beginning of the audio stream to the beginning of the - // audio that resulted in the item. + // The number of seconds into an audio stream that indicates the creation time + // of an item. StartTime *float64 `type:"double"` // The type of the item. PRONUNCIATION indicates that the item is a word that // was recognized in the input audio. PUNCTUATION indicates that the item was - // interpreted as a pause in the input audio. + // interpreted as a pause in the input audio, such as a period to indicate the + // end of a sentence. Type *string `type:"string" enum:"ItemType"` - - // Indicates whether a word in the item matches a word in the vocabulary filter - // you've chosen for your real-time stream. If true then a word in the item - // matches your vocabulary filter. - VocabularyFilterMatch *bool `type:"boolean"` } // String returns the string representation -func (s Item) String() string { +func (s MedicalItem) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Item) GoString() string { +func (s MedicalItem) GoString() string { return s.String() } +// SetConfidence sets the Confidence field's value. +func (s *MedicalItem) SetConfidence(v float64) *MedicalItem { + s.Confidence = &v + return s +} + // SetContent sets the Content field's value. -func (s *Item) SetContent(v string) *Item { +func (s *MedicalItem) SetContent(v string) *MedicalItem { s.Content = &v return s } // SetEndTime sets the EndTime field's value. -func (s *Item) SetEndTime(v float64) *Item { +func (s *MedicalItem) SetEndTime(v float64) *MedicalItem { s.EndTime = &v return s } // SetSpeaker sets the Speaker field's value. -func (s *Item) SetSpeaker(v string) *Item { +func (s *MedicalItem) SetSpeaker(v string) *MedicalItem { s.Speaker = &v return s } // SetStartTime sets the StartTime field's value. -func (s *Item) SetStartTime(v float64) *Item { +func (s *MedicalItem) SetStartTime(v float64) *MedicalItem { s.StartTime = &v return s } // SetType sets the Type field's value. -func (s *Item) SetType(v string) *Item { +func (s *MedicalItem) SetType(v string) *MedicalItem { s.Type = &v return s } -// SetVocabularyFilterMatch sets the VocabularyFilterMatch field's value. -func (s *Item) SetVocabularyFilterMatch(v bool) *Item { - s.VocabularyFilterMatch = &v +// The results of transcribing a portion of the input audio stream. +type MedicalResult struct { + _ struct{} `type:"structure"` + + // A list of possible transcriptions of the audio. Each alternative typically + // contains one Item that contains the result of the transcription. + Alternatives []*MedicalAlternative `type:"list"` + + // When channel identification is enabled, Amazon Transcribe Medical transcribes + // the speech from each audio channel separately. + // + // You can use ChannelId to retrieve the transcription results for a single + // channel in your audio stream. + ChannelId *string `type:"string"` + + // The time, in seconds, from the beginning of the audio stream to the end of + // the result. + EndTime *float64 `type:"double"` + + // Amazon Transcribe Medical divides the incoming audio stream into segments + // at natural points in the audio. Transcription results are returned based + // on these segments. + // + // The IsPartial field is true to indicate that Amazon Transcribe Medical has + // additional transcription data to send. The IsPartial field is false to indicate + // that this is the last transcription result for the segment. + IsPartial *bool `type:"boolean"` + + // A unique identifier for the result. + ResultId *string `type:"string"` + + // The time, in seconds, from the beginning of the audio stream to the beginning + // of the result. + StartTime *float64 `type:"double"` +} + +// String returns the string representation +func (s MedicalResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MedicalResult) GoString() string { + return s.String() +} + +// SetAlternatives sets the Alternatives field's value. +func (s *MedicalResult) SetAlternatives(v []*MedicalAlternative) *MedicalResult { + s.Alternatives = v return s } -// You have exceeded the maximum number of concurrent transcription streams, -// are starting transcription streams too quickly, or the maximum audio length -// of 4 hours. Wait until a stream has finished processing, or break your audio -// stream into smaller chunks and try your request again. -type LimitExceededException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +// SetChannelId sets the ChannelId field's value. +func (s *MedicalResult) SetChannelId(v string) *MedicalResult { + s.ChannelId = &v + return s +} - Message_ *string `locationName:"Message" type:"string"` +// SetEndTime sets the EndTime field's value. +func (s *MedicalResult) SetEndTime(v float64) *MedicalResult { + s.EndTime = &v + return s +} + +// SetIsPartial sets the IsPartial field's value. +func (s *MedicalResult) SetIsPartial(v bool) *MedicalResult { + s.IsPartial = &v + return s +} + +// SetResultId sets the ResultId field's value. +func (s *MedicalResult) SetResultId(v string) *MedicalResult { + s.ResultId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *MedicalResult) SetStartTime(v float64) *MedicalResult { + s.StartTime = &v + return s +} + +// The medical transcript in a MedicalTranscriptEvent. +type MedicalTranscript struct { + _ struct{} `type:"structure"` + + // MedicalResult objects that contain the results of transcribing a portion + // of the input audio stream. The array can be empty. + Results []*MedicalResult `type:"list"` } // String returns the string representation -func (s LimitExceededException) String() string { +func (s MedicalTranscript) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LimitExceededException) GoString() string { +func (s MedicalTranscript) GoString() string { return s.String() } -// The LimitExceededException is and event in the TranscriptResultStream group of events. -func (s *LimitExceededException) eventTranscriptResultStream() {} +// SetResults sets the Results field's value. +func (s *MedicalTranscript) SetResults(v []*MedicalResult) *MedicalTranscript { + s.Results = v + return s +} -// UnmarshalEvent unmarshals the EventStream Message into the LimitExceededException value. +// Represents a set of transcription results from the server to the client. +// It contains one or more segments of the transcription. +type MedicalTranscriptEvent struct { + _ struct{} `type:"structure"` + + // The transcription of the audio stream. The transcription is composed of all + // of the items in the results list. + Transcript *MedicalTranscript `type:"structure"` +} + +// String returns the string representation +func (s MedicalTranscriptEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MedicalTranscriptEvent) GoString() string { + return s.String() +} + +// SetTranscript sets the Transcript field's value. +func (s *MedicalTranscriptEvent) SetTranscript(v *MedicalTranscript) *MedicalTranscriptEvent { + s.Transcript = v + return s +} + +// The MedicalTranscriptEvent is and event in the MedicalTranscriptResultStream group of events. +func (s *MedicalTranscriptEvent) eventMedicalTranscriptResultStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the MedicalTranscriptEvent value. // This method is only used internally within the SDK's EventStream handling. -func (s *LimitExceededException) UnmarshalEvent( +func (s *MedicalTranscriptEvent) UnmarshalEvent( payloadUnmarshaler protocol.PayloadUnmarshaler, msg eventstream.Message, ) error { @@ -903,8 +1593,8 @@ func (s *LimitExceededException) UnmarshalEvent( // MarshalEvent marshals the type into an stream event value. This method // should only used internally within the SDK's EventStream handling. -func (s *LimitExceededException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { - msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) +func (s *MedicalTranscriptEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) var buf bytes.Buffer if err = pm.MarshalPayload(&buf, s); err != nil { return eventstream.Message{}, err @@ -913,42 +1603,166 @@ func (s *LimitExceededException) MarshalEvent(pm protocol.PayloadMarshaler) (msg return msg, err } -func newErrorLimitExceededException(v protocol.ResponseMetadata) error { - return &LimitExceededException{ - RespMetadata: v, +// MedicalTranscriptResultStreamEvent groups together all EventStream +// events writes for MedicalTranscriptResultStream. +// +// These events are: +// +// * MedicalTranscriptEvent +type MedicalTranscriptResultStreamEvent interface { + eventMedicalTranscriptResultStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +// MedicalTranscriptResultStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be MedicalTranscriptResultStream. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * MedicalTranscriptEvent +// * MedicalTranscriptResultStreamUnknownEvent +type MedicalTranscriptResultStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan MedicalTranscriptResultStreamEvent + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readMedicalTranscriptResultStream struct { + eventReader *eventstreamapi.EventReader + stream chan MedicalTranscriptResultStreamEvent + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func newReadMedicalTranscriptResultStream(eventReader *eventstreamapi.EventReader) *readMedicalTranscriptResultStream { + r := &readMedicalTranscriptResultStream{ + eventReader: eventReader, + stream: make(chan MedicalTranscriptResultStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), } + go r.readEventStream() + + return r } -// Code returns the exception type name. -func (s *LimitExceededException) Code() string { - return "LimitExceededException" +// Close will close the underlying event stream reader. +func (r *readMedicalTranscriptResultStream) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() } -// Message returns the exception's message. -func (s *LimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ +func (r *readMedicalTranscriptResultStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readMedicalTranscriptResultStream) Closed() <-chan struct{} { + return r.done +} + +func (r *readMedicalTranscriptResultStream) safeClose() { + close(r.done) +} + +func (r *readMedicalTranscriptResultStream) Err() error { + return r.err.Err() +} + +func (r *readMedicalTranscriptResultStream) Events() <-chan MedicalTranscriptResultStreamEvent { + return r.stream +} + +func (r *readMedicalTranscriptResultStream) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.(MedicalTranscriptResultStreamEvent): + case <-r.done: + return + } } - return "" } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *LimitExceededException) OrigErr() error { - return nil +type unmarshalerForMedicalTranscriptResultStreamEvent struct { + metadata protocol.ResponseMetadata } -func (s *LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +func (u unmarshalerForMedicalTranscriptResultStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "TranscriptEvent": + return &MedicalTranscriptEvent{}, nil + case "BadRequestException": + return newErrorBadRequestException(u.metadata).(eventstreamapi.Unmarshaler), nil + case "ConflictException": + return newErrorConflictException(u.metadata).(eventstreamapi.Unmarshaler), nil + case "InternalFailureException": + return newErrorInternalFailureException(u.metadata).(eventstreamapi.Unmarshaler), nil + case "LimitExceededException": + return newErrorLimitExceededException(u.metadata).(eventstreamapi.Unmarshaler), nil + case "ServiceUnavailableException": + return newErrorServiceUnavailableException(u.metadata).(eventstreamapi.Unmarshaler), nil + default: + return &MedicalTranscriptResultStreamUnknownEvent{Type: eventType}, nil + } } -// Status code returns the HTTP status code for the request's response error. -func (s *LimitExceededException) StatusCode() int { - return s.RespMetadata.StatusCode +// MedicalTranscriptResultStreamUnknownEvent provides a failsafe event for the +// MedicalTranscriptResultStream group of events when an unknown event is received. +type MedicalTranscriptResultStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The MedicalTranscriptResultStreamUnknownEvent is and event in the MedicalTranscriptResultStream +// group of events. +func (s *MedicalTranscriptResultStreamUnknownEvent) eventMedicalTranscriptResultStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *MedicalTranscriptResultStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil } -// RequestID returns the service's response RequestID for request. -func (s *LimitExceededException) RequestID() string { - return s.RespMetadata.RequestID +// UnmarshalEvent unmarshals the EventStream Message into the MedicalTranscriptResultStream value. +// This method is only used internally within the SDK's EventStream handling. +func (e *MedicalTranscriptResultStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil } // The result of transcribing a portion of the input audio stream. @@ -1050,6 +1864,9 @@ func (s ServiceUnavailableException) GoString() string { return s.String() } +// The ServiceUnavailableException is and event in the MedicalTranscriptResultStream group of events. +func (s *ServiceUnavailableException) eventMedicalTranscriptResultStream() {} + // The ServiceUnavailableException is and event in the TranscriptResultStream group of events. func (s *ServiceUnavailableException) eventTranscriptResultStream() {} @@ -1117,6 +1934,295 @@ func (s *ServiceUnavailableException) RequestID() string { return s.RespMetadata.RequestID } +type StartMedicalStreamTranscriptionInput struct { + _ struct{} `type:"structure" payload:"AudioStream"` + + // When true, instructs Amazon Transcribe Medical to process each audio channel + // separately and then merge the transcription output of each channel into a + // single transcription. + // + // Amazon Transcribe Medical also produces a transcription of each item. An + // item includes the start time, end time, and any alternative transcriptions. + // + // You can't set both ShowSpeakerLabel and EnableChannelIdentification in the + // same request. If you set both, your request returns a BadRequestException. + EnableChannelIdentification *bool `location:"header" locationName:"x-amzn-transcribe-enable-channel-identification" type:"boolean"` + + // Indicates the source language used in the input audio stream. For Amazon + // Transcribe Medical, this is US English (en-US). + // + // LanguageCode is a required field + LanguageCode *string `location:"header" locationName:"x-amzn-transcribe-language-code" type:"string" required:"true" enum:"LanguageCode"` + + // The encoding used for the input audio. + // + // MediaEncoding is a required field + MediaEncoding *string `location:"header" locationName:"x-amzn-transcribe-media-encoding" type:"string" required:"true" enum:"MediaEncoding"` + + // The sample rate of the input audio in Hertz. Sample rates of 16000 Hz or + // higher are accepted. + // + // MediaSampleRateHertz is a required field + MediaSampleRateHertz *int64 `location:"header" locationName:"x-amzn-transcribe-sample-rate" min:"8000" type:"integer" required:"true"` + + // The number of channels that are in your audio stream. + NumberOfChannels *int64 `location:"header" locationName:"x-amzn-transcribe-number-of-channels" min:"2" type:"integer"` + + // Optional. An identifier for the transcription session. If you don't provide + // a session ID, Amazon Transcribe generates one for you and returns it in the + // response. + SessionId *string `location:"header" locationName:"x-amzn-transcribe-session-id" min:"36" type:"string"` + + // When true, enables speaker identification in your real-time stream. + ShowSpeakerLabel *bool `location:"header" locationName:"x-amzn-transcribe-show-speaker-label" type:"boolean"` + + // The medical specialty of the clinician or provider. + // + // Specialty is a required field + Specialty *string `location:"header" locationName:"x-amzn-transcribe-specialty" type:"string" required:"true" enum:"Specialty"` + + // The type of input audio. Choose DICTATION for a provider dictating patient + // notes. Choose CONVERSATION for a dialogue between a patient and one or more + // medical professionanls. + // + // Type is a required field + Type *string `location:"header" locationName:"x-amzn-transcribe-type" type:"string" required:"true" enum:"Type"` + + // The name of the medical custom vocabulary to use when processing the real-time + // stream. + VocabularyName *string `location:"header" locationName:"x-amzn-transcribe-vocabulary-name" min:"1" type:"string"` +} + +// String returns the string representation +func (s StartMedicalStreamTranscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMedicalStreamTranscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMedicalStreamTranscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMedicalStreamTranscriptionInput"} + if s.LanguageCode == nil { + invalidParams.Add(request.NewErrParamRequired("LanguageCode")) + } + if s.MediaEncoding == nil { + invalidParams.Add(request.NewErrParamRequired("MediaEncoding")) + } + if s.MediaSampleRateHertz == nil { + invalidParams.Add(request.NewErrParamRequired("MediaSampleRateHertz")) + } + if s.MediaSampleRateHertz != nil && *s.MediaSampleRateHertz < 8000 { + invalidParams.Add(request.NewErrParamMinValue("MediaSampleRateHertz", 8000)) + } + if s.NumberOfChannels != nil && *s.NumberOfChannels < 2 { + invalidParams.Add(request.NewErrParamMinValue("NumberOfChannels", 2)) + } + if s.SessionId != nil && len(*s.SessionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 36)) + } + if s.Specialty == nil { + invalidParams.Add(request.NewErrParamRequired("Specialty")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.VocabularyName != nil && len(*s.VocabularyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VocabularyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnableChannelIdentification sets the EnableChannelIdentification field's value. +func (s *StartMedicalStreamTranscriptionInput) SetEnableChannelIdentification(v bool) *StartMedicalStreamTranscriptionInput { + s.EnableChannelIdentification = &v + return s +} + +// SetLanguageCode sets the LanguageCode field's value. +func (s *StartMedicalStreamTranscriptionInput) SetLanguageCode(v string) *StartMedicalStreamTranscriptionInput { + s.LanguageCode = &v + return s +} + +// SetMediaEncoding sets the MediaEncoding field's value. +func (s *StartMedicalStreamTranscriptionInput) SetMediaEncoding(v string) *StartMedicalStreamTranscriptionInput { + s.MediaEncoding = &v + return s +} + +// SetMediaSampleRateHertz sets the MediaSampleRateHertz field's value. +func (s *StartMedicalStreamTranscriptionInput) SetMediaSampleRateHertz(v int64) *StartMedicalStreamTranscriptionInput { + s.MediaSampleRateHertz = &v + return s +} + +// SetNumberOfChannels sets the NumberOfChannels field's value. +func (s *StartMedicalStreamTranscriptionInput) SetNumberOfChannels(v int64) *StartMedicalStreamTranscriptionInput { + s.NumberOfChannels = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *StartMedicalStreamTranscriptionInput) SetSessionId(v string) *StartMedicalStreamTranscriptionInput { + s.SessionId = &v + return s +} + +// SetShowSpeakerLabel sets the ShowSpeakerLabel field's value. +func (s *StartMedicalStreamTranscriptionInput) SetShowSpeakerLabel(v bool) *StartMedicalStreamTranscriptionInput { + s.ShowSpeakerLabel = &v + return s +} + +// SetSpecialty sets the Specialty field's value. +func (s *StartMedicalStreamTranscriptionInput) SetSpecialty(v string) *StartMedicalStreamTranscriptionInput { + s.Specialty = &v + return s +} + +// SetType sets the Type field's value. +func (s *StartMedicalStreamTranscriptionInput) SetType(v string) *StartMedicalStreamTranscriptionInput { + s.Type = &v + return s +} + +// SetVocabularyName sets the VocabularyName field's value. +func (s *StartMedicalStreamTranscriptionInput) SetVocabularyName(v string) *StartMedicalStreamTranscriptionInput { + s.VocabularyName = &v + return s +} + +type StartMedicalStreamTranscriptionOutput struct { + _ struct{} `type:"structure" payload:"TranscriptResultStream"` + + eventStream *StartMedicalStreamTranscriptionEventStream + + // Shows whether channel identification has been enabled in the stream. + EnableChannelIdentification *bool `location:"header" locationName:"x-amzn-transcribe-enable-channel-identification" type:"boolean"` + + // The language code for the response transcript. For Amazon Transcribe Medical, + // this is US English (en-US). + LanguageCode *string `location:"header" locationName:"x-amzn-transcribe-language-code" type:"string" enum:"LanguageCode"` + + // The encoding used for the input audio stream. + MediaEncoding *string `location:"header" locationName:"x-amzn-transcribe-media-encoding" type:"string" enum:"MediaEncoding"` + + // The sample rate of the input audio in Hertz. Valid value: 16000 Hz. + MediaSampleRateHertz *int64 `location:"header" locationName:"x-amzn-transcribe-sample-rate" min:"8000" type:"integer"` + + // The number of channels identified in the stream. + NumberOfChannels *int64 `location:"header" locationName:"x-amzn-transcribe-number-of-channels" min:"2" type:"integer"` + + // An identifier for the streaming transcription. + RequestId *string `location:"header" locationName:"x-amzn-request-id" type:"string"` + + // Optional. An identifier for the transcription session. If you don't provide + // a session ID, Amazon Transcribe generates one for you and returns it in the + // response. + SessionId *string `location:"header" locationName:"x-amzn-transcribe-session-id" min:"36" type:"string"` + + // Shows whether speaker identification was enabled in the stream. + ShowSpeakerLabel *bool `location:"header" locationName:"x-amzn-transcribe-show-speaker-label" type:"boolean"` + + // The specialty in the medical domain. + Specialty *string `location:"header" locationName:"x-amzn-transcribe-specialty" type:"string" enum:"Specialty"` + + // The type of audio that was transcribed. + Type *string `location:"header" locationName:"x-amzn-transcribe-type" type:"string" enum:"Type"` + + // The name of the vocabulary used when processing the stream. + VocabularyName *string `location:"header" locationName:"x-amzn-transcribe-vocabulary-name" min:"1" type:"string"` +} + +// String returns the string representation +func (s StartMedicalStreamTranscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMedicalStreamTranscriptionOutput) GoString() string { + return s.String() +} + +// SetEnableChannelIdentification sets the EnableChannelIdentification field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetEnableChannelIdentification(v bool) *StartMedicalStreamTranscriptionOutput { + s.EnableChannelIdentification = &v + return s +} + +// SetLanguageCode sets the LanguageCode field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetLanguageCode(v string) *StartMedicalStreamTranscriptionOutput { + s.LanguageCode = &v + return s +} + +// SetMediaEncoding sets the MediaEncoding field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetMediaEncoding(v string) *StartMedicalStreamTranscriptionOutput { + s.MediaEncoding = &v + return s +} + +// SetMediaSampleRateHertz sets the MediaSampleRateHertz field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetMediaSampleRateHertz(v int64) *StartMedicalStreamTranscriptionOutput { + s.MediaSampleRateHertz = &v + return s +} + +// SetNumberOfChannels sets the NumberOfChannels field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetNumberOfChannels(v int64) *StartMedicalStreamTranscriptionOutput { + s.NumberOfChannels = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetRequestId(v string) *StartMedicalStreamTranscriptionOutput { + s.RequestId = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetSessionId(v string) *StartMedicalStreamTranscriptionOutput { + s.SessionId = &v + return s +} + +// SetShowSpeakerLabel sets the ShowSpeakerLabel field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetShowSpeakerLabel(v bool) *StartMedicalStreamTranscriptionOutput { + s.ShowSpeakerLabel = &v + return s +} + +// SetSpecialty sets the Specialty field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetSpecialty(v string) *StartMedicalStreamTranscriptionOutput { + s.Specialty = &v + return s +} + +// SetType sets the Type field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetType(v string) *StartMedicalStreamTranscriptionOutput { + s.Type = &v + return s +} + +// SetVocabularyName sets the VocabularyName field's value. +func (s *StartMedicalStreamTranscriptionOutput) SetVocabularyName(v string) *StartMedicalStreamTranscriptionOutput { + s.VocabularyName = &v + return s +} + +// GetStream returns the type to interact with the event stream. +func (s *StartMedicalStreamTranscriptionOutput) GetStream() *StartMedicalStreamTranscriptionEventStream { + return s.eventStream +} + type StartStreamTranscriptionInput struct { _ struct{} `type:"structure" payload:"AudioStream"` @@ -1135,7 +2241,7 @@ type StartStreamTranscriptionInput struct { // LanguageCode is a required field LanguageCode *string `location:"header" locationName:"x-amzn-transcribe-language-code" type:"string" required:"true" enum:"LanguageCode"` - // The encoding used for the input audio. pcm is the only valid value. + // The encoding used for the input audio. // // MediaEncoding is a required field MediaEncoding *string `location:"header" locationName:"x-amzn-transcribe-media-encoding" type:"string" required:"true" enum:"MediaEncoding"` @@ -1679,6 +2785,15 @@ const ( // LanguageCodeDeDe is a LanguageCode enum value LanguageCodeDeDe = "de-DE" + + // LanguageCodePtBr is a LanguageCode enum value + LanguageCodePtBr = "pt-BR" + + // LanguageCodeJaJp is a LanguageCode enum value + LanguageCodeJaJp = "ja-JP" + + // LanguageCodeKoKr is a LanguageCode enum value + LanguageCodeKoKr = "ko-KR" ) // LanguageCode_Values returns all elements of the LanguageCode enum @@ -1692,18 +2807,77 @@ func LanguageCode_Values() []string { LanguageCodeEnAu, LanguageCodeItIt, LanguageCodeDeDe, + LanguageCodePtBr, + LanguageCodeJaJp, + LanguageCodeKoKr, } } const ( // MediaEncodingPcm is a MediaEncoding enum value MediaEncodingPcm = "pcm" + + // MediaEncodingOggOpus is a MediaEncoding enum value + MediaEncodingOggOpus = "ogg-opus" + + // MediaEncodingFlac is a MediaEncoding enum value + MediaEncodingFlac = "flac" ) // MediaEncoding_Values returns all elements of the MediaEncoding enum func MediaEncoding_Values() []string { return []string{ MediaEncodingPcm, + MediaEncodingOggOpus, + MediaEncodingFlac, + } +} + +const ( + // SpecialtyPrimarycare is a Specialty enum value + SpecialtyPrimarycare = "PRIMARYCARE" + + // SpecialtyCardiology is a Specialty enum value + SpecialtyCardiology = "CARDIOLOGY" + + // SpecialtyNeurology is a Specialty enum value + SpecialtyNeurology = "NEUROLOGY" + + // SpecialtyOncology is a Specialty enum value + SpecialtyOncology = "ONCOLOGY" + + // SpecialtyRadiology is a Specialty enum value + SpecialtyRadiology = "RADIOLOGY" + + // SpecialtyUrology is a Specialty enum value + SpecialtyUrology = "UROLOGY" +) + +// Specialty_Values returns all elements of the Specialty enum +func Specialty_Values() []string { + return []string{ + SpecialtyPrimarycare, + SpecialtyCardiology, + SpecialtyNeurology, + SpecialtyOncology, + SpecialtyRadiology, + SpecialtyUrology, + } +} + +const ( + // TypeConversation is a Type enum value + TypeConversation = "CONVERSATION" + + // TypeDictation is a Type enum value + TypeDictation = "DICTATION" +) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeConversation, + TypeDictation, } } diff --git a/service/transcribestreamingservice/errors.go b/service/transcribestreamingservice/errors.go index 97cedf52e64..5ac62936fcd 100644 --- a/service/transcribestreamingservice/errors.go +++ b/service/transcribestreamingservice/errors.go @@ -11,9 +11,10 @@ const ( // ErrCodeBadRequestException for service response error code // "BadRequestException". // - // One or more arguments to the StartStreamTranscription operation was invalid. - // For example, MediaEncoding was not set to pcm or LanguageCode was not set - // to a valid code. Check the parameters and try your request again. + // One or more arguments to the StartStreamTranscription or StartMedicalStreamTranscription + // operation was invalid. For example, MediaEncoding was not set to a valid + // encoding, or LanguageCode was not set to a valid code. Check the parameters + // and try your request again. ErrCodeBadRequestException = "BadRequestException" // ErrCodeConflictException for service response error code @@ -26,8 +27,8 @@ const ( // ErrCodeInternalFailureException for service response error code // "InternalFailureException". // - // A problem occurred while processing the audio. Amazon Transcribe terminated - // processing. Try your request again. + // A problem occurred while processing the audio. Amazon Transcribe or Amazon + // Transcribe Medical terminated processing. Try your request again. ErrCodeInternalFailureException = "InternalFailureException" // ErrCodeLimitExceededException for service response error code diff --git a/service/transcribestreamingservice/eventstream_test.go b/service/transcribestreamingservice/eventstream_test.go index daa92d04457..a768efe3144 100644 --- a/service/transcribestreamingservice/eventstream_test.go +++ b/service/transcribestreamingservice/eventstream_test.go @@ -33,6 +33,597 @@ var _ context.Context var _ sync.WaitGroup var _ strings.Reader +func TestStartMedicalStreamTranscription_Read(t *testing.T) { + expectEvents, eventMsgs := mockStartMedicalStreamTranscriptionReadEvents() + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + defer resp.GetStream().Close() + + var i int + for event := range resp.GetStream().Events() { + if event == nil { + t.Errorf("%d, expect event, got nil", i) + } + if e, a := expectEvents[i], event; !reflect.DeepEqual(e, a) { + t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a) + } + i++ + } + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } +} + +func TestStartMedicalStreamTranscription_ReadClose(t *testing.T) { + _, eventMsgs := mockStartMedicalStreamTranscriptionReadEvents() + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + // Assert calling Err before close does not close the stream. + resp.GetStream().Err() + select { + case _, ok := <-resp.GetStream().Events(): + if !ok { + t.Fatalf("expect stream not to be closed, but was") + } + default: + } + + resp.GetStream().Close() + <-resp.GetStream().Events() + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } +} + +func TestStartMedicalStreamTranscription_ReadUnknownEvent(t *testing.T) { + expectEvents, eventMsgs := mockStartMedicalStreamTranscriptionReadEvents() + var eventOffset int + + unknownEvent := eventstream.Message{ + Headers: eventstream.Headers{ + eventstreamtest.EventMessageTypeHeader, + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("UnknownEventName"), + }, + }, + Payload: []byte("some unknown event"), + } + + eventMsgs = append(eventMsgs[:eventOffset], + append([]eventstream.Message{unknownEvent}, eventMsgs[eventOffset:]...)...) + + expectEvents = append(expectEvents[:eventOffset], + append([]MedicalTranscriptResultStreamEvent{ + &MedicalTranscriptResultStreamUnknownEvent{ + Type: "UnknownEventName", + Message: unknownEvent, + }, + }, + expectEvents[eventOffset:]...)...) + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + defer resp.GetStream().Close() + + var i int + for event := range resp.GetStream().Events() { + if event == nil { + t.Errorf("%d, expect event, got nil", i) + } + if e, a := expectEvents[i], event; !reflect.DeepEqual(e, a) { + t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a) + } + i++ + } + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } +} + +func BenchmarkStartMedicalStreamTranscription_Read(b *testing.B) { + _, eventMsgs := mockStartMedicalStreamTranscriptionReadEvents() + var buf bytes.Buffer + encoder := eventstream.NewEncoder(&buf) + for _, msg := range eventMsgs { + if err := encoder.Encode(msg); err != nil { + b.Fatalf("failed to encode message, %v", err) + } + } + stream := &loopReader{source: bytes.NewReader(buf.Bytes())} + + sess := unit.Session + svc := New(sess, &aws.Config{ + Endpoint: aws.String("https://example.com"), + DisableParamValidation: aws.Bool(true), + }) + svc.Handlers.Send.Swap(corehandlers.SendHandler.Name, + request.NamedHandler{Name: "mockSend", + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Status: "200 OK", + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(stream), + } + }, + }, + ) + + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + b.Fatalf("failed to create request, %v", err) + } + defer resp.GetStream().Close() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if err = resp.GetStream().Err(); err != nil { + b.Fatalf("expect no error, got %v", err) + } + event := <-resp.GetStream().Events() + if event == nil { + b.Fatalf("expect event, got nil, %v, %d", resp.GetStream().Err(), i) + } + } +} + +func mockStartMedicalStreamTranscriptionReadEvents() ( + []MedicalTranscriptResultStreamEvent, + []eventstream.Message, +) { + expectEvents := []MedicalTranscriptResultStreamEvent{ + &MedicalTranscriptEvent{ + Transcript: &MedicalTranscript{ + Results: []*MedicalResult{ + { + Alternatives: []*MedicalAlternative{ + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + }, + ChannelId: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + IsPartial: aws.Bool(true), + ResultId: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + }, + { + Alternatives: []*MedicalAlternative{ + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + }, + ChannelId: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + IsPartial: aws.Bool(true), + ResultId: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + }, + { + Alternatives: []*MedicalAlternative{ + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + { + Items: []*MedicalItem{ + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + { + Confidence: aws.Float64(123.45), + Content: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + Speaker: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + Type: aws.String("string value goes here"), + }, + }, + Transcript: aws.String("string value goes here"), + }, + }, + ChannelId: aws.String("string value goes here"), + EndTime: aws.Float64(123.45), + IsPartial: aws.Bool(true), + ResultId: aws.String("string value goes here"), + StartTime: aws.Float64(123.45), + }, + }, + }, + }, + } + + var marshalers request.HandlerList + marshalers.PushBackNamed(restjson.BuildHandler) + payloadMarshaler := protocol.HandlerPayloadMarshal{ + Marshalers: marshalers, + } + _ = payloadMarshaler + + eventMsgs := []eventstream.Message{ + { + Headers: eventstream.Headers{ + eventstreamtest.EventMessageTypeHeader, + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("TranscriptEvent"), + }, + }, + Payload: eventstreamtest.MarshalEventPayload(payloadMarshaler, expectEvents[0]), + }, + } + + return expectEvents, eventMsgs +} +func TestStartMedicalStreamTranscription_ReadException(t *testing.T) { + expectEvents := []MedicalTranscriptResultStreamEvent{ + &BadRequestException{ + RespMetadata: protocol.ResponseMetadata{ + StatusCode: 200, + }, + Message_: aws.String("string value goes here"), + }, + } + + var marshalers request.HandlerList + marshalers.PushBackNamed(restjson.BuildHandler) + payloadMarshaler := protocol.HandlerPayloadMarshal{ + Marshalers: marshalers, + } + + eventMsgs := []eventstream.Message{ + { + Headers: eventstream.Headers{ + eventstreamtest.EventExceptionTypeHeader, + { + Name: eventstreamapi.ExceptionTypeHeader, + Value: eventstream.StringValue("BadRequestException"), + }, + }, + Payload: eventstreamtest.MarshalEventPayload(payloadMarshaler, expectEvents[0]), + }, + } + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + defer resp.GetStream().Close() + + <-resp.GetStream().Events() + + err = resp.GetStream().Err() + if err == nil { + t.Fatalf("expect err, got none") + } + + expectErr := &BadRequestException{ + RespMetadata: protocol.ResponseMetadata{ + StatusCode: 200, + }, + Message_: aws.String("string value goes here"), + } + aerr, ok := err.(awserr.Error) + if !ok { + t.Errorf("expect exception, got %T, %#v", err, err) + } + if e, a := expectErr.Code(), aerr.Code(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := expectErr.Message(), aerr.Message(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + + if e, a := expectErr, aerr; !reflect.DeepEqual(e, a) { + t.Errorf("expect error %+#v, got %+#v", e, a) + } +} + +var _ awserr.Error = (*BadRequestException)(nil) +var _ awserr.Error = (*ConflictException)(nil) +var _ awserr.Error = (*InternalFailureException)(nil) +var _ awserr.Error = (*LimitExceededException)(nil) +var _ awserr.Error = (*ServiceUnavailableException)(nil) + func TestStartStreamTranscription_Read(t *testing.T) { expectEvents, eventMsgs := mockStartStreamTranscriptionReadEvents() sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, @@ -636,6 +1227,195 @@ func (c *loopReader) Read(p []byte) (int, error) { return c.source.Read(p) } +func TestStartMedicalStreamTranscription_Write(t *testing.T) { + clientEvents, expectedClientEvents := mockStartMedicalStreamTranscriptionWriteEvents() + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + &eventstreamtest.ServeEventStream{ + T: t, + ClientEvents: expectedClientEvents, + BiDirectional: true, + }, + true) + defer cleanupFn() + + svc := New(sess) + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + stream := resp.GetStream() + + for _, event := range clientEvents { + err = stream.Send(context.Background(), event) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + } + + if err := stream.Close(); err != nil { + t.Errorf("expect no error, got %v", err) + } +} + +func TestStartMedicalStreamTranscription_WriteClose(t *testing.T) { + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{T: t, BiDirectional: true}, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + // Assert calling Err before close does not close the stream. + resp.GetStream().Err() + + err = resp.GetStream().Send(context.Background(), &AudioEvent{}) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + resp.GetStream().Close() + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } +} + +func TestStartMedicalStreamTranscription_WriteError(t *testing.T) { + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + BiDirectional: true, + ForceCloseAfter: time.Millisecond * 500, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + defer resp.GetStream().Close() + + for { + err = resp.GetStream().Send(context.Background(), &AudioEvent{}) + if err != nil { + if strings.Contains("unable to send event", err.Error()) { + t.Errorf("expected stream closed error, got %v", err) + } + break + } + } +} + +func TestStartMedicalStreamTranscription_ReadWrite(t *testing.T) { + expectedServiceEvents, serviceEvents := mockStartMedicalStreamTranscriptionReadEvents() + clientEvents, expectedClientEvents := mockStartMedicalStreamTranscriptionWriteEvents() + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + &eventstreamtest.ServeEventStream{ + T: t, + ClientEvents: expectedClientEvents, + Events: serviceEvents, + BiDirectional: true, + }, + true) + defer cleanupFn() + + svc := New(sess) + resp, err := svc.StartMedicalStreamTranscription(nil) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + stream := resp.GetStream() + defer stream.Close() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + var i int + for event := range resp.GetStream().Events() { + if event == nil { + t.Errorf("%d, expect event, got nil", i) + } + if e, a := expectedServiceEvents[i], event; !reflect.DeepEqual(e, a) { + t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a) + } + i++ + } + }() + + for _, event := range clientEvents { + err = stream.Send(context.Background(), event) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + } + + resp.GetStream().Close() + + wg.Wait() + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } +} + +func mockStartMedicalStreamTranscriptionWriteEvents() ( + []AudioStreamEvent, + []eventstream.Message, +) { + inputEvents := []AudioStreamEvent{ + &AudioEvent{ + AudioChunk: []byte("blob value goes here"), + }, + } + + var marshalers request.HandlerList + marshalers.PushBackNamed(restjson.BuildHandler) + payloadMarshaler := protocol.HandlerPayloadMarshal{ + Marshalers: marshalers, + } + _ = payloadMarshaler + + eventMsgs := []eventstream.Message{ + { + Headers: eventstream.Headers{ + eventstreamtest.EventMessageTypeHeader, + { + Name: ":content-type", + Value: eventstream.StringValue("application/octet-stream"), + }, + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("AudioEvent"), + }, + }, + Payload: inputEvents[0].(*AudioEvent).AudioChunk, + }, + } + + return inputEvents, eventMsgs +} + func TestStartStreamTranscription_Write(t *testing.T) { clientEvents, expectedClientEvents := mockStartStreamTranscriptionWriteEvents() diff --git a/service/transcribestreamingservice/transcribestreamingserviceiface/interface.go b/service/transcribestreamingservice/transcribestreamingserviceiface/interface.go index 0ca7d0d763f..f6246fce9e7 100644 --- a/service/transcribestreamingservice/transcribestreamingserviceiface/interface.go +++ b/service/transcribestreamingservice/transcribestreamingserviceiface/interface.go @@ -26,7 +26,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // Amazon Transcribe Streaming Service. // func myFunc(svc transcribestreamingserviceiface.TranscribeStreamingServiceAPI) bool { -// // Make svc.StartStreamTranscription request +// // Make svc.StartMedicalStreamTranscription request // } // // func main() { @@ -42,7 +42,7 @@ import ( // type mockTranscribeStreamingServiceClient struct { // transcribestreamingserviceiface.TranscribeStreamingServiceAPI // } -// func (m *mockTranscribeStreamingServiceClient) StartStreamTranscription(input *transcribestreamingservice.StartStreamTranscriptionInput) (*transcribestreamingservice.StartStreamTranscriptionOutput, error) { +// func (m *mockTranscribeStreamingServiceClient) StartMedicalStreamTranscription(input *transcribestreamingservice.StartMedicalStreamTranscriptionInput) (*transcribestreamingservice.StartMedicalStreamTranscriptionOutput, error) { // // mock response/functionality // } // @@ -60,6 +60,10 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type TranscribeStreamingServiceAPI interface { + StartMedicalStreamTranscription(*transcribestreamingservice.StartMedicalStreamTranscriptionInput) (*transcribestreamingservice.StartMedicalStreamTranscriptionOutput, error) + StartMedicalStreamTranscriptionWithContext(aws.Context, *transcribestreamingservice.StartMedicalStreamTranscriptionInput, ...request.Option) (*transcribestreamingservice.StartMedicalStreamTranscriptionOutput, error) + StartMedicalStreamTranscriptionRequest(*transcribestreamingservice.StartMedicalStreamTranscriptionInput) (*request.Request, *transcribestreamingservice.StartMedicalStreamTranscriptionOutput) + StartStreamTranscription(*transcribestreamingservice.StartStreamTranscriptionInput) (*transcribestreamingservice.StartStreamTranscriptionOutput, error) StartStreamTranscriptionWithContext(aws.Context, *transcribestreamingservice.StartStreamTranscriptionInput, ...request.Option) (*transcribestreamingservice.StartStreamTranscriptionOutput, error) StartStreamTranscriptionRequest(*transcribestreamingservice.StartStreamTranscriptionInput) (*request.Request, *transcribestreamingservice.StartStreamTranscriptionOutput)