diff --git a/CHANGELOG.md b/CHANGELOG.md index 0bec9930189..ba5ffe3d217 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,35 @@ +Release v1.35.35 (2020-11-24) +=== + +### Service Client Updates +* `service/appflow`: Updates service API and documentation +* `service/batch`: Updates service API and documentation + * Add Ec2Configuration in ComputeEnvironment.ComputeResources. Use in CreateComputeEnvironment API to enable AmazonLinux2 support. +* `service/cloudformation`: Updates service API and documentation + * Adds support for the new Modules feature for CloudFormation. A module encapsulates one or more resources and their respective configurations for reuse across your organization. +* `service/cloudtrail`: Updates service API and documentation + * CloudTrail now includes advanced event selectors, which give you finer-grained control over the events that are logged to your trail. +* `service/codebuild`: Updates service API and documentation + * Adding GetReportGroupTrend API for Test Reports. +* `service/cognito-idp`: Updates service API and documentation +* `service/comprehend`: Updates service API, documentation, and paginators +* `service/elasticbeanstalk`: Updates service API and documentation + * Updates the Integer constraint of DescribeEnvironmentManagedActionHistory's MaxItems parameter to [1, 100]. +* `service/fsx`: Updates service API and documentation +* `service/gamelift`: Updates service API and documentation + * GameLift FlexMatch is now available as a standalone matchmaking solution. FlexMatch now provides customizable matchmaking for games hosted peer-to-peer, on-premises, or on cloud compute primitives. +* `service/iotsitewise`: Updates service API and documentation +* `service/lex-models`: Updates service API +* `service/mediaconvert`: Updates service API and documentation + * AWS Elemental MediaConvert SDK has added support for Vorbis and Opus audio in OGG/OGA containers. +* `service/mwaa`: Adds new service +* `service/quicksight`: Updates service API and documentation + * Support for embedding without user registration. New enum EmbeddingIdentityType. A potential breaking change. Affects code that refers IdentityType enum type directly instead of literal string value. +* `service/states`: Updates service API and documentation + * This release of the AWS Step Functions SDK introduces support for Synchronous Express Workflows +* `service/timestream-write`: Updates service API and documentation +* `service/transcribe-streaming`: Updates service API and documentation + Release v1.35.34 (2020-11-23) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index e300d11724c..21b8010fa84 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -5676,6 +5676,7 @@ var awsPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -7339,6 +7340,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "ram": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ diff --git a/aws/version.go b/aws/version.go index cb5d91e2533..3bfb31c995e 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.35.34" +const SDKVersion = "1.35.35" diff --git a/models/apis/appflow/2020-08-23/api-2.json b/models/apis/appflow/2020-08-23/api-2.json index 652e7ec030e..545b4cf027b 100644 --- a/models/apis/appflow/2020-08-23/api-2.json +++ b/models/apis/appflow/2020-08-23/api-2.json @@ -497,7 +497,8 @@ "Trendmicro":{"shape":"TrendmicroMetadata"}, "Veeva":{"shape":"VeevaMetadata"}, "Zendesk":{"shape":"ZendeskMetadata"}, - "EventBridge":{"shape":"EventBridgeMetadata"} + "EventBridge":{"shape":"EventBridgeMetadata"}, + "Upsolver":{"shape":"UpsolverMetadata"} } }, "ConnectorOAuthRequest":{ @@ -637,7 +638,8 @@ "Infornexus", "Amplitude", "Veeva", - "EventBridge" + "EventBridge", + "Upsolver" ] }, "ConnectorTypeList":{ @@ -896,7 +898,8 @@ "S3":{"shape":"S3DestinationProperties"}, "Salesforce":{"shape":"SalesforceDestinationProperties"}, "Snowflake":{"shape":"SnowflakeDestinationProperties"}, - "EventBridge":{"shape":"EventBridgeDestinationProperties"} + "EventBridge":{"shape":"EventBridgeDestinationProperties"}, + "Upsolver":{"shape":"UpsolverDestinationProperties"} } }, "DestinationField":{ @@ -2279,6 +2282,38 @@ "max":256, "pattern":"\\S+" }, + "UpsolverBucketName":{ + "type":"string", + "max":63, + "min":16, + "pattern":"^(upsolver-appflow)\\S*" + }, + "UpsolverDestinationProperties":{ + "type":"structure", + "required":[ + "bucketName", + "s3OutputFormatConfig" + ], + "members":{ + "bucketName":{"shape":"UpsolverBucketName"}, + "bucketPrefix":{"shape":"BucketPrefix"}, + "s3OutputFormatConfig":{"shape":"UpsolverS3OutputFormatConfig"} + } + }, + "UpsolverMetadata":{ + "type":"structure", + "members":{ + } + }, + "UpsolverS3OutputFormatConfig":{ + "type":"structure", + "required":["prefixConfig"], + "members":{ + "fileType":{"shape":"FileType"}, + "prefixConfig":{"shape":"PrefixConfig"}, + "aggregationConfig":{"shape":"AggregationConfig"} + } + }, "Username":{ "type":"string", "max":512, diff --git a/models/apis/appflow/2020-08-23/docs-2.json b/models/apis/appflow/2020-08-23/docs-2.json index 887e80925b0..8c2a62126fa 100644 --- a/models/apis/appflow/2020-08-23/docs-2.json +++ b/models/apis/appflow/2020-08-23/docs-2.json @@ -56,7 +56,8 @@ "AggregationConfig": { "base": "
The aggregation settings that you can use to customize the output format of your flow data.
", "refs": { - "S3OutputFormatConfig$aggregationConfig": null + "S3OutputFormatConfig$aggregationConfig": null, + "UpsolverS3OutputFormatConfig$aggregationConfig": null } }, "AggregationType": { @@ -170,7 +171,8 @@ "S3DestinationProperties$bucketPrefix": "The object key for the destination bucket in which Amazon AppFlow places the files.
", "S3SourceProperties$bucketPrefix": "The object key for the Amazon S3 bucket in which the source files are stored.
", "SnowflakeConnectorProfileProperties$bucketPrefix": "The bucket path that refers to the Amazon S3 bucket associated with Snowflake.
", - "SnowflakeDestinationProperties$bucketPrefix": "The object key for the destination bucket in which Amazon AppFlow places the files.
" + "SnowflakeDestinationProperties$bucketPrefix": "The object key for the destination bucket in which Amazon AppFlow places the files.
", + "UpsolverDestinationProperties$bucketPrefix": "The object key for the destination Upsolver Amazon S3 bucket in which Amazon AppFlow places the files.
" } }, "ClientCredentialsArn": { @@ -696,7 +698,8 @@ "FileType": { "base": null, "refs": { - "S3OutputFormatConfig$fileType": "Indicates the file type that Amazon AppFlow places in the Amazon S3 bucket.
" + "S3OutputFormatConfig$fileType": "Indicates the file type that Amazon AppFlow places in the Amazon S3 bucket.
", + "UpsolverS3OutputFormatConfig$fileType": "Indicates the file type that Amazon AppFlow places in the Upsolver Amazon S3 bucket.
" } }, "FilterOperatorList": { @@ -1063,7 +1066,8 @@ "PrefixConfig": { "base": "Determines the prefix that Amazon AppFlow applies to the destination folder name. You can name your destination folders according to the flow frequency and date.
", "refs": { - "S3OutputFormatConfig$prefixConfig": "Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date.
" + "S3OutputFormatConfig$prefixConfig": "Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date.
", + "UpsolverS3OutputFormatConfig$prefixConfig": null } }, "PrefixFormat": { @@ -1220,7 +1224,7 @@ "ScheduleExpression": { "base": null, "refs": { - "ScheduledTriggerProperties$scheduleExpression": "The scheduling expression that determines when and how often the rule runs.
" + "ScheduledTriggerProperties$scheduleExpression": " The scheduling expression that determines the rate at which the schedule will run, for example rate(5minutes)
.
Specifies the account user name that most recently updated the flow.
" } }, + "UpsolverBucketName": { + "base": null, + "refs": { + "UpsolverDestinationProperties$bucketName": "The Upsolver Amazon S3 bucket name in which Amazon AppFlow places the transferred data.
" + } + }, + "UpsolverDestinationProperties": { + "base": "The properties that are applied when Upsolver is used as a destination.
", + "refs": { + "DestinationConnectorProperties$Upsolver": "The properties required to query Upsolver.
" + } + }, + "UpsolverMetadata": { + "base": "The connector metadata specific to Upsolver.
", + "refs": { + "ConnectorMetadata$Upsolver": "The connector metadata specific to Upsolver.
" + } + }, + "UpsolverS3OutputFormatConfig": { + "base": "The configuration that determines how Amazon AppFlow formats the flow output data when Upsolver is used as the destination.
", + "refs": { + "UpsolverDestinationProperties$s3OutputFormatConfig": "The configuration that determines how data is formatted when Upsolver is used as the flow destination.
" + } + }, "Username": { "base": null, "refs": { diff --git a/models/apis/batch/2016-08-10/api-2.json b/models/apis/batch/2016-08-10/api-2.json index 251d43afcb4..c6508f9282e 100644 --- a/models/apis/batch/2016-08-10/api-2.json +++ b/models/apis/batch/2016-08-10/api-2.json @@ -439,7 +439,11 @@ "maxvCpus":{"shape":"Integer"}, "desiredvCpus":{"shape":"Integer"}, "instanceTypes":{"shape":"StringList"}, - "imageId":{"shape":"String"}, + "imageId":{ + "shape":"String", + "deprecated":true, + "deprecatedMessage":"This field is deprecated, use ec2Configuration[].imageIdOverride instead." + }, "subnets":{"shape":"StringList"}, "securityGroupIds":{"shape":"StringList"}, "ec2KeyPair":{"shape":"String"}, @@ -448,7 +452,8 @@ "placementGroup":{"shape":"String"}, "bidPercentage":{"shape":"Integer"}, "spotIamFleetRole":{"shape":"String"}, - "launchTemplate":{"shape":"LaunchTemplateSpecification"} + "launchTemplate":{"shape":"LaunchTemplateSpecification"}, + "ec2Configuration":{"shape":"Ec2ConfigurationList"} } }, "ComputeResourceUpdate":{ @@ -699,6 +704,18 @@ "type":"list", "member":{"shape":"Device"} }, + "Ec2Configuration":{ + "type":"structure", + "required":["imageType"], + "members":{ + "imageType":{"shape":"ImageType"}, + "imageIdOverride":{"shape":"ImageIdOverride"} + } + }, + "Ec2ConfigurationList":{ + "type":"list", + "member":{"shape":"Ec2Configuration"} + }, "EnvironmentVariables":{ "type":"list", "member":{"shape":"KeyValuePair"} @@ -723,6 +740,16 @@ "sourcePath":{"shape":"String"} } }, + "ImageIdOverride":{ + "type":"string", + "max":256, + "min":1 + }, + "ImageType":{ + "type":"string", + "max":256, + "min":1 + }, "Integer":{"type":"integer"}, "JQState":{ "type":"string", diff --git a/models/apis/batch/2016-08-10/docs-2.json b/models/apis/batch/2016-08-10/docs-2.json index ea8a4b07c24..1a4fa332ca5 100644 --- a/models/apis/batch/2016-08-10/docs-2.json +++ b/models/apis/batch/2016-08-10/docs-2.json @@ -312,6 +312,18 @@ "LinuxParameters$devices": "Any host devices to expose to the container. This parameter maps to Devices
in the Create a container section of the Docker Remote API and the --device
option to docker run.
Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If the Ec2Configuration
is not specified, the default is ECS_AL1
.
Provides additional details used to selecting the AMI to use for instances in a compute environment.
" + } + }, "EnvironmentVariables": { "base": null, "refs": { @@ -338,6 +350,18 @@ "Volume$host": "The contents of the host
parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data is not guaranteed to persist after the containers associated with it stop running.
The AMI ID used for instances launched in the compute environment that match the image type. This setting overrides the imageId
set in the computeResource
object.
The image type to match with the instance type to pick an AMI. If the imageIdOverride
parameter is not specified, then a recent Amazon ECS-optimized AMI will be used.
Amazon Linux 2− Default for all AWS Graviton-based instance families (for example, C6g
, M6g
, R6g
, and T4g
) and can be used for all non-GPU instance types.
Amazon Linux 2 (GPU)−Default for all GPU instance families (for example P4
and G4
) and can be used for all non-AWS Graviton-based instance types.
Amazon Linux−Default for all non-GPU, non-AWS-Graviton instance families. Amazon Linux is reaching the end-of-life of standard support. For more information, see Amazon Linux AMI.
A short, human-readable string to provide additional details about the current status of the compute environment.
", "ComputeEnvironmentDetail$serviceRole": "The service role associated with the compute environment that allows AWS Batch to make calls to AWS API operations on your behalf.
", "ComputeEnvironmentOrder$computeEnvironment": "The Amazon Resource Name (ARN) of the compute environment.
", - "ComputeResource$imageId": "The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
", + "ComputeResource$imageId": "The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is overridden by the imageIdOverride
member of the Ec2Configuration
structure.
The Amazon EC2 key pair that is used for instances launched in the compute environment.
", "ComputeResource$instanceRole": "The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole
or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole
. For more information, see Amazon ECS Instance Role in the AWS Batch User Guide.
The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.
", diff --git a/models/apis/cloudformation/2010-05-15/api-2.json b/models/apis/cloudformation/2010-05-15/api-2.json index 439aa3b7c31..4afe324c00b 100644 --- a/models/apis/cloudformation/2010-05-15/api-2.json +++ b/models/apis/cloudformation/2010-05-15/api-2.json @@ -1987,6 +1987,7 @@ "Visibility":{"shape":"Visibility"}, "ProvisioningType":{"shape":"ProvisioningType"}, "DeprecatedStatus":{"shape":"DeprecatedStatus"}, + "Type":{"shape":"RegistryType"}, "MaxResults":{"shape":"MaxResults"}, "NextToken":{"shape":"NextToken"} } @@ -2015,6 +2016,7 @@ "LogGroupName":{"shape":"LogGroupName"} } }, + "LogicalIdHierarchy":{"type":"string"}, "LogicalResourceId":{"type":"string"}, "LogicalResourceIds":{ "type":"list", @@ -2037,6 +2039,13 @@ "min":1 }, "Metadata":{"type":"string"}, + "ModuleInfo":{ + "type":"structure", + "members":{ + "TypeHierarchy":{"shape":"TypeHierarchy"}, + "LogicalIdHierarchy":{"shape":"LogicalIdHierarchy"} + } + }, "MonitoringTimeInMinutes":{ "type":"integer", "max":180, @@ -2321,7 +2330,10 @@ }, "RegistryType":{ "type":"string", - "enum":["RESOURCE"] + "enum":[ + "RESOURCE", + "MODULE" + ] }, "Replacement":{ "type":"string", @@ -2366,7 +2378,8 @@ "Replacement":{"shape":"Replacement"}, "Scope":{"shape":"Scope"}, "Details":{"shape":"ResourceChangeDetails"}, - "ChangeSetId":{"shape":"ChangeSetId"} + "ChangeSetId":{"shape":"ChangeSetId"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "ResourceChangeDetail":{ @@ -2820,7 +2833,8 @@ "ResourceStatus":{"shape":"ResourceStatus"}, "ResourceStatusReason":{"shape":"ResourceStatusReason"}, "Description":{"shape":"Description"}, - "DriftInformation":{"shape":"StackResourceDriftInformation"} + "DriftInformation":{"shape":"StackResourceDriftInformation"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "StackResourceDetail":{ @@ -2842,7 +2856,8 @@ "ResourceStatusReason":{"shape":"ResourceStatusReason"}, "Description":{"shape":"Description"}, "Metadata":{"shape":"Metadata"}, - "DriftInformation":{"shape":"StackResourceDriftInformation"} + "DriftInformation":{"shape":"StackResourceDriftInformation"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "StackResourceDrift":{ @@ -2864,7 +2879,8 @@ "ActualProperties":{"shape":"Properties"}, "PropertyDifferences":{"shape":"PropertyDifferences"}, "StackResourceDriftStatus":{"shape":"StackResourceDriftStatus"}, - "Timestamp":{"shape":"Timestamp"} + "Timestamp":{"shape":"Timestamp"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "StackResourceDriftInformation":{ @@ -2921,7 +2937,8 @@ "LastUpdatedTimestamp":{"shape":"Timestamp"}, "ResourceStatus":{"shape":"ResourceStatus"}, "ResourceStatusReason":{"shape":"ResourceStatusReason"}, - "DriftInformation":{"shape":"StackResourceDriftInformationSummary"} + "DriftInformation":{"shape":"StackResourceDriftInformationSummary"}, + "ModuleInfo":{"shape":"ModuleInfo"} } }, "StackResources":{ @@ -3301,11 +3318,12 @@ "max":1024, "pattern":"arn:aws[A-Za-z0-9-]{0,64}:cloudformation:[A-Za-z0-9-]{1,64}:([0-9]{12})?:type/.+" }, + "TypeHierarchy":{"type":"string"}, "TypeName":{ "type":"string", - "max":196, + "max":204, "min":10, - "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}" + "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}(::MODULE){0,1}" }, "TypeNotFoundException":{ "type":"structure", diff --git a/models/apis/cloudformation/2010-05-15/docs-2.json b/models/apis/cloudformation/2010-05-15/docs-2.json index 90d79becc53..cd81739832b 100644 --- a/models/apis/cloudformation/2010-05-15/docs-2.json +++ b/models/apis/cloudformation/2010-05-15/docs-2.json @@ -445,7 +445,7 @@ } }, "DeploymentTargets": { - "base": "[Service-managed
permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization master account, even if the master account is in your organization or in an OU in your organization.
For update operations, you can specify either Accounts
or OrganizationalUnitIds
. For create and delete operations, specify OrganizationalUnitIds
.
[Service-managed
permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization.
For update operations, you can specify either Accounts
or OrganizationalUnitIds
. For create and delete operations, specify OrganizationalUnitIds
.
[Service-managed
permissions] The AWS Organizations accounts for which to create stack instances in the specified Regions.
You can specify Accounts
or DeploymentTargets
, but not both.
[Service-managed
permissions] The AWS Organizations accounts from which to delete stack instances.
You can specify Accounts
or DeploymentTargets
, but not both.
Specifies logging configuration information for a type.
" } }, + "LogicalIdHierarchy": { + "base": null, + "refs": { + "ModuleInfo$LogicalIdHierarchy": "A concantenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /
.
In the following example, the resource was created from a module, moduleA
, that is nested inside a parent module, moduleB
.
moduleA/moduleB
For more information, see Referencing resources in a module in the CloudFormation User Guide.
" + } + }, "LogicalResourceId": { "base": null, "refs": { @@ -1089,6 +1095,16 @@ "StackResourceDetail$Metadata": "The content of the Metadata
attribute declared for the resource. For more information, see Metadata Attribute in the AWS CloudFormation User Guide.
Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
For more information on modules, see Using modules to encapsulate and reuse resource configurations in the CloudFormation User Guide.
", + "refs": { + "ResourceChange$ModuleInfo": "Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
", + "StackResource$ModuleInfo": "Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
", + "StackResourceDetail$ModuleInfo": "Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
", + "StackResourceDrift$ModuleInfo": "Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
", + "StackResourceSummary$ModuleInfo": "Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
" + } + }, "MonitoringTimeInMinutes": { "base": null, "refs": { @@ -1470,6 +1486,7 @@ "DescribeTypeOutput$Type": "The kind of type.
Currently the only valid value is RESOURCE
.
The kind of type.
Currently the only valid value is RESOURCE
.
Conditional: You must specify either TypeName
and Type
, or Arn
.
The kind of the type.
Currently the only valid value is RESOURCE
.
Conditional: You must specify either TypeName
and Type
, or Arn
.
The type of extension.
", "RegisterTypeInput$Type": "The kind of type.
Currently, the only valid value is RESOURCE
.
The kind of type.
Conditional: You must specify either TypeName
and Type
, or Arn
.
The kind of type.
", @@ -2433,6 +2450,12 @@ "TypeVersionSummary$Arn": "The Amazon Resource Name (ARN) of the type version.
" } }, + "TypeHierarchy": { + "base": null, + "refs": { + "ModuleInfo$TypeHierarchy": "A concantenated list of the the module type or types containing the resource. Module types are listed starting with the inner-most nested module, and separated by /
.
In the following example, the resource was created from a module of type AWS::First::Example::MODULE
, that is nested inside a parent module of type AWS::Second::Example::MODULE
.
AWS::First::Example::MODULE/AWS::Second::Example::MODULE
Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.
CloudTrail uses different private/public key pairs per region. Each digest file is signed with a private key unique to its region. Therefore, when you validate a digest file from a particular region, you must look in the same region for its corresponding public key.
Lists the tags for the trail in the current region.
", "ListTrails": "Lists trails that are in the current account.
", - "LookupEvents": "Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:
AWS access key
Event ID
Event name
Event source
Read only
Resource name
Resource type
User name
Lookup supports the following attributes for Insights events:
Event ID
Event name
Event source
All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.
The rate of lookup requests is limited to two per second per account. If this limit is exceeded, a throttling error occurs.
Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:
AWS access key
Event ID
Event name
Event source
Read only
Resource name
Resource type
User name
Lookup supports the following attributes for Insights events:
Event ID
Event name
Event source
All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.
The rate of lookup requests is limited to two per second, per account, per region. If this limit is exceeded, a throttling error occurs.
Configures an event selector for your trail. Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events.
When an event occurs in your account, CloudTrail evaluates the event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.
Example
You create an event selector for a trail and specify that you want write-only events.
The EC2 GetConsoleOutput
and RunInstances
API operations occur in your account.
CloudTrail evaluates whether the events match your event selectors.
The RunInstances
is a write-only event and it matches your event selector. The trail logs the event.
The GetConsoleOutput
is a read-only event but it doesn't match your event selector. The trail doesn't log the event.
The PutEventSelectors
operation must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException
is thrown.
You can configure up to five event selectors for each trail. For more information, see Logging Data and Management Events for Trails and Limits in AWS CloudTrail in the AWS CloudTrail User Guide.
", "PutInsightSelectors": "Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail. You also use PutInsightSelectors
to turn off Insights event logging, by passing an empty list of insight types. In this release, only ApiCallRateInsight
is supported as an Insights selector.
Removes the specified tags from a trail.
", @@ -32,6 +32,32 @@ "refs": { } }, + "AdvancedEventSelector": { + "base": null, + "refs": { + "AdvancedEventSelectors$member": null + } + }, + "AdvancedEventSelectors": { + "base": null, + "refs": { + "GetEventSelectorsResponse$AdvancedEventSelectors": null, + "PutEventSelectorsRequest$AdvancedEventSelectors": null, + "PutEventSelectorsResponse$AdvancedEventSelectors": null + } + }, + "AdvancedFieldSelector": { + "base": null, + "refs": { + "AdvancedFieldSelectors$member": null + } + }, + "AdvancedFieldSelectors": { + "base": null, + "refs": { + "AdvancedEventSelector$FieldSelectors": null + } + }, "Boolean": { "base": null, "refs": { @@ -44,7 +70,7 @@ "CreateTrailResponse$LogFileValidationEnabled": "Specifies whether log file integrity validation is enabled.
", "CreateTrailResponse$IsOrganizationTrail": "Specifies whether the trail is an organization trail.
", "DescribeTrailsRequest$includeShadowTrails": "Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region, or in the case of an organization trail, the replication of an organization trail in member accounts. If you do not include shadow trails, organization trails in a member account and region replication trails will not be returned. The default is true.
", - "EventSelector$IncludeManagementEvents": "Specify if you want your event selector to include management events for your trail.
For more information, see Management Events in the AWS CloudTrail User Guide.
By default, the value is true
.
Specify if you want your event selector to include management events for your trail.
For more information, see Management Events in the AWS CloudTrail User Guide.
By default, the value is true
.
The first copy of management events is free. You are charged for additional copies of management events that you are logging on any subsequent trail in the same region. For more information about CloudTrail pricing, see AWS CloudTrail Pricing.
", "GetTrailStatusResponse$IsLogging": "Whether the CloudTrail is currently logging AWS API calls.
", "Trail$IncludeGlobalServiceEvents": "Set to True to include AWS API calls from AWS global services such as IAM. Otherwise, False.
", "Trail$IsMultiRegionTrail": "Specifies whether the trail exists only in one region or exists in all regions.
", @@ -373,7 +399,7 @@ } }, "KmsKeyNotFoundException": { - "base": "This exception is thrown when the KMS key does not exist, or when the S3 bucket and the KMS key are not in the same region.
", + "base": "This exception is thrown when the KMS key does not exist, when the S3 bucket and the KMS key are not in the same region, or when the KMS key associated with the SNS topic either does not exist or is not in the same region.
", "refs": { } }, @@ -463,6 +489,23 @@ "refs": { } }, + "Operator": { + "base": null, + "refs": { + "AdvancedFieldSelector$Equals": null, + "AdvancedFieldSelector$StartsWith": null, + "AdvancedFieldSelector$EndsWith": null, + "AdvancedFieldSelector$NotEquals": null, + "AdvancedFieldSelector$NotStartsWith": null, + "AdvancedFieldSelector$NotEndsWith": null + } + }, + "OperatorValue": { + "base": null, + "refs": { + "Operator$member": null + } + }, "OrganizationNotInAllFeaturesModeException": { "base": "This exception is thrown when AWS Organizations is not configured to support all features. All features must be enabled in AWS Organization to support creating an organization trail. For more information, see Prepare For Creating a Trail For Your Organization.
", "refs": { @@ -566,6 +609,18 @@ "refs": { } }, + "SelectorField": { + "base": null, + "refs": { + "AdvancedFieldSelector$Field": null + } + }, + "SelectorName": { + "base": null, + "refs": { + "AdvancedEventSelector$Name": null + } + }, "StartLoggingRequest": { "base": "The request to CloudTrail to start logging AWS API calls for an account.
", "refs": { diff --git a/models/apis/codebuild/2016-10-06/api-2.json b/models/apis/codebuild/2016-10-06/api-2.json index de68230f3e1..1613c89c6a0 100644 --- a/models/apis/codebuild/2016-10-06/api-2.json +++ b/models/apis/codebuild/2016-10-06/api-2.json @@ -239,6 +239,19 @@ {"shape":"ResourceNotFoundException"} ] }, + "GetReportGroupTrend":{ + "name":"GetReportGroupTrend", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetReportGroupTrendInput"}, + "output":{"shape":"GetReportGroupTrendOutput"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "GetResourcePolicy":{ "name":"GetResourcePolicy", "http":{ @@ -1280,6 +1293,25 @@ "type":"list", "member":{"shape":"FilterGroup"} }, + "GetReportGroupTrendInput":{ + "type":"structure", + "required":[ + "reportGroupArn", + "trendField" + ], + "members":{ + "reportGroupArn":{"shape":"NonEmptyString"}, + "numOfReports":{"shape":"PageSize"}, + "trendField":{"shape":"ReportGroupTrendFieldType"} + } + }, + "GetReportGroupTrendOutput":{ + "type":"structure", + "members":{ + "stats":{"shape":"ReportGroupTrendStats"}, + "rawData":{"shape":"ReportGroupTrendRawDataList"} + } + }, "GetResourcePolicyInput":{ "type":"structure", "required":["resourceArn"], @@ -1941,6 +1973,32 @@ "DELETING" ] }, + "ReportGroupTrendFieldType":{ + "type":"string", + "enum":[ + "PASS_RATE", + "DURATION", + "TOTAL", + "LINE_COVERAGE", + "LINES_COVERED", + "LINES_MISSED", + "BRANCH_COVERAGE", + "BRANCHES_COVERED", + "BRANCHES_MISSED" + ] + }, + "ReportGroupTrendRawDataList":{ + "type":"list", + "member":{"shape":"ReportWithRawData"} + }, + "ReportGroupTrendStats":{ + "type":"structure", + "members":{ + "average":{"shape":"String"}, + "max":{"shape":"String"}, + "min":{"shape":"String"} + } + }, "ReportGroups":{ "type":"list", "member":{"shape":"ReportGroup"}, @@ -1976,6 +2034,13 @@ "CODE_COVERAGE" ] }, + "ReportWithRawData":{ + "type":"structure", + "members":{ + "reportArn":{"shape":"NonEmptyString"}, + "data":{"shape":"String"} + } + }, "Reports":{ "type":"list", "member":{"shape":"Report"}, diff --git a/models/apis/codebuild/2016-10-06/docs-2.json b/models/apis/codebuild/2016-10-06/docs-2.json index 180d1300f43..c84e05e2d8e 100644 --- a/models/apis/codebuild/2016-10-06/docs-2.json +++ b/models/apis/codebuild/2016-10-06/docs-2.json @@ -20,6 +20,7 @@ "DeleteWebhook": "For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.
", "DescribeCodeCoverages": "Retrieves one or more code coverage reports.
", "DescribeTestCases": "Returns a list of details about test cases for a report.
", + "GetReportGroupTrend": null, "GetResourcePolicy": "Gets a resource policy that is identified by its resource ARN.
", "ImportSourceCredentials": "Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.
", "InvalidateProjectCache": "Resets the cache for a project.
", @@ -594,6 +595,16 @@ "Webhook$filterGroups": "An array of arrays of WebhookFilter
objects used to determine which webhooks are triggered. At least one WebhookFilter
in the array must specify EVENT
as its type
.
For a build to be triggered, at least one filter group in the filterGroups
array must pass. For a filter group to pass, each of its filters must pass.
The ARN of the report for which test cases are returned.
", "EnvironmentVariable$name": "The name or key of the environment variable.
", "ExportedEnvironmentVariable$name": "The name of this exported environment variable.
", + "GetReportGroupTrendInput$reportGroupArn": null, "GetResourcePolicyInput$resourceArn": "The ARN of the resource that is associated with the resource policy.
", "GetResourcePolicyOutput$policy": "The resource policy for the resource identified by the input ARN parameter.
", "Identifiers$member": null, @@ -891,6 +903,7 @@ "ReportArns$member": null, "ReportGroup$arn": " The ARN of a ReportGroup
.
Specifies the identifier of the batch build to restart.
", "RetryBuildInput$id": "Specifies the identifier of the build to restart.
", "S3ReportExportConfig$bucket": "The name of the S3 bucket where the raw data of a report are exported.
", @@ -942,6 +955,7 @@ "refs": { "DescribeCodeCoveragesInput$maxResults": "The maximum number of results to return.
", "DescribeTestCasesInput$maxResults": " The maximum number of paginated test cases returned per response. Use nextToken
to iterate pages in the list of returned TestCase
objects. The default value is 100.
The maximum number of results to return.
", "ListBuildBatchesInput$maxResults": "The maximum number of results to return.
", "ListReportGroupsInput$maxResults": " The maximum number of paginated report groups returned per response. Use nextToken
to iterate pages in the list of returned ReportGroup
objects. The default value is 100.
The type of the ReportGroup
. The one valid value is TEST
.
The source version for the corresponding source identifier. If specified, must be one of:
For AWS CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon Simple Storage Service (Amazon S3): the version ID of the object that represents the build input ZIP file to use.
For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.
", "Report$name": "The name of the report that was run.
", "Report$executionId": "The ARN of the build run that generated this report.
", + "ReportGroupTrendStats$average": null, + "ReportGroupTrendStats$max": null, + "ReportGroupTrendStats$min": null, "ReportStatusCounts$key": null, + "ReportWithRawData$data": null, "ResolvedArtifact$location": "The location of the artifact.
", "ResolvedArtifact$identifier": "The identifier of the artifact.
", "RetryBuildBatchInput$idempotencyToken": "A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuildBatch
request. The token is included in the RetryBuildBatch
request and is valid for five minutes. If you repeat the RetryBuildBatch
request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.
Responds to the authentication challenge.
", "SetRiskConfiguration": "Configures actions on detected risks. To delete the risk configuration for UserPoolId
or ClientId
, pass null values for all four configuration types.
To enable Amazon Cognito advanced security features, update the user pool to include the UserPoolAddOns
keyAdvancedSecurityMode
.
Sets the UI customization information for a user pool's built-in app UI.
You can specify app UI customization settings for a single client (with a specific clientId
) or for all clients (by setting the clientId
to ALL
). If you specify ALL
, the default configuration will be used for every client that has no UI customization set previously. If you specify UI customization settings for a particular client, it will no longer fall back to the ALL
configuration.
To use this API, your user pool must have a domain associated with it. Otherwise, there is no place to host the app's pages, and the service will throw an error.
Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are enabled and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are enabled. If multiple options are enabled and no preference is set, a challenge to choose an MFA option will be returned during sign in.
", + "SetUserMFAPreference": "Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are enabled and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are enabled. If multiple options are enabled and no preference is set, a challenge to choose an MFA option will be returned during sign in. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.
", "SetUserPoolMfaConfig": "Set the user pool multi-factor authentication (MFA) configuration.
", "SetUserSettings": "This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure TOTP software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
", "SignUp": "Registers the user in the specified user pool and creates a user name, password, and user attributes.
", @@ -477,6 +477,8 @@ "CreateGroupRequest$RoleArn": "The role ARN for the group.
", "CreateUserImportJobRequest$CloudWatchLogsRoleArn": "The role ARN for the Amazon CloudWatch Logging role for the user import job.
", "CustomDomainConfigType$CertificateArn": "The Amazon Resource Name (ARN) of an AWS Certificate Manager SSL certificate. You use this certificate for the subdomain of your custom domain.
", + "CustomEmailLambdaVersionConfigType$LambdaArn": "The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users.
", + "CustomSMSLambdaVersionConfigType$LambdaArn": "The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users.
", "EmailConfigurationType$SourceArn": "The Amazon Resource Name (ARN) of a verified email address in Amazon SES. This email address is used in one of the following ways, depending on the value that you specify for the EmailSendingAccount
parameter:
If you specify COGNITO_DEFAULT
, Amazon Cognito uses this address as the custom FROM address when it emails your users by using its built-in email account.
If you specify DEVELOPER
, Amazon Cognito emails your users with this address by calling Amazon SES on your behalf.
The role ARN for the group.
", "LambdaConfigType$PreSignUp": "A pre-registration AWS Lambda trigger.
", @@ -489,9 +491,10 @@ "LambdaConfigType$VerifyAuthChallengeResponse": "Verifies the authentication challenge response.
", "LambdaConfigType$PreTokenGeneration": "A Lambda trigger that is invoked before token generation.
", "LambdaConfigType$UserMigration": "The user migration Lambda config type.
", + "LambdaConfigType$KMSKeyID": "The Amazon Resource Name of Key Management Service Customer master keys . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender
and CustomSMSSender
.
The Amazon Resource Name (ARN) of the user pool that the tags are assigned to.
", "NotifyConfigurationType$SourceArn": "The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. It permits Amazon Cognito to send for the email address specified in the From
parameter.
The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages.
", + "SmsConfigurationType$SnsCallerArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages. SMS messages are subject to a spending limit.
", "TagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the user pool to assign the tags to.
", "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the user pool that the tags are assigned to.
", "UpdateGroupRequest$RoleArn": "The new role ARN for the group. This is used for setting the cognito:roles
and cognito:preferred_role
claims in the token.
In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.
", "PasswordPolicyType$RequireNumbers": "In the password policy that you have set, refers to whether you have required users to use at least one number in their password.
", "PasswordPolicyType$RequireSymbols": "In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.
", - "SMSMfaSettingsType$Enabled": "Specifies whether SMS text message MFA is enabled.
", + "SMSMfaSettingsType$Enabled": "Specifies whether SMS text message MFA is enabled. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted.
", "SMSMfaSettingsType$PreferredMfa": "Specifies whether SMS is the preferred MFA method.
", "SchemaAttributeType$DeveloperOnlyAttribute": "We recommend that you use WriteAttributes in the user pool client to control how attributes can be mutated for new use cases instead of using DeveloperOnlyAttribute
.
Specifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users will not be able to modify this attribute using their access token. For example, DeveloperOnlyAttribute
can be modified using AdminUpdateUserAttributes but cannot be updated using UpdateUserAttributes.
Specifies whether the value of the attribute can be changed.
For any user pool attribute that's mapped to an identity provider attribute, you must set this parameter to true
. Amazon Cognito updates mapped attributes when users sign in to your application through an identity provider. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see Specifying Identity Provider Attribute Mappings for Your User Pool.
Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.
", "SignUpResponse$UserConfirmed": "A response from the server indicating that a user registration has been confirmed.
", "SoftwareTokenMfaConfigType$Enabled": "Specifies whether software token MFA is enabled.
", - "SoftwareTokenMfaSettingsType$Enabled": "Specifies whether software token MFA is enabled.
", + "SoftwareTokenMfaSettingsType$Enabled": "Specifies whether software token MFA is enabled. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted.
", "SoftwareTokenMfaSettingsType$PreferredMfa": "Specifies whether software token MFA is the preferred MFA method.
", "UpdateUserPoolClientRequest$AllowedOAuthFlowsUserPoolClient": "Set to true if the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
", "UserPoolClientType$AllowedOAuthFlowsUserPoolClient": "Set to true if the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
", @@ -995,6 +998,30 @@ "UpdateUserPoolDomainRequest$CustomDomainConfig": "The configuration for a custom domain that hosts the sign-up and sign-in pages for your application. Use this object to specify an SSL certificate that is managed by ACM.
" } }, + "CustomEmailLambdaVersionConfigType": { + "base": "A custom email sender Lambda configuration type.
", + "refs": { + "LambdaConfigType$CustomEmailSender": "A custom email sender AWS Lambda trigger.
" + } + }, + "CustomEmailSenderLambdaVersionType": { + "base": null, + "refs": { + "CustomEmailLambdaVersionConfigType$LambdaVersion": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom email Lambda function. The only supported value is V1_0
.
A custom SMS sender Lambda configuration type.
", + "refs": { + "LambdaConfigType$CustomSMSSender": "A custom SMS sender AWS Lambda trigger.
" + } + }, + "CustomSMSSenderLambdaVersionType": { + "base": null, + "refs": { + "CustomSMSLambdaVersionConfigType$LambdaVersion": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS Lambda function. The only supported value is V1_0
.
The email configuration type.
", + "base": "The email configuration type.
Amazon Cognito has specific regions for use with Amazon SES. For more information on the supported regions, see Email Settings for Amazon Cognito User Pools.
The email configuration.
", "UpdateUserPoolRequest$EmailConfiguration": "Email configuration.
", @@ -1298,39 +1325,39 @@ "EmailSendingAccountType": { "base": null, "refs": { - "EmailConfigurationType$EmailSendingAccount": "Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:
When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.
To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide.
The default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn
parameter.
When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account.
If you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn
parameter.
Before Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role, which is a type of IAM role, in your AWS account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide.
Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:
When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.
To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide.
The default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn
parameter.
If EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't allowed:
EmailVerificationMessage
EmailVerificationSubject
InviteMessageTemplate.EmailMessage
InviteMessageTemplate.EmailSubject
VerificationMessageTemplate.EmailMessage
VerificationMessageTemplate.EmailMessageByLink
VerificationMessageTemplate.EmailSubject,
VerificationMessageTemplate.EmailSubjectByLink
DEVELOPER EmailSendingAccount is required.
When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account.
If you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn
parameter.
Before Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role, which is a type of IAM role, in your AWS account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide.
The email message template for sending a confirmation link to the user.
" + "VerificationMessageTemplateType$EmailMessageByLink": "The email message template for sending a confirmation link to the user. EmailMessageByLink is allowed only if EmailSendingAccount is DEVELOPER.
" } }, "EmailVerificationMessageType": { "base": null, "refs": { - "CreateUserPoolRequest$EmailVerificationMessage": "A string representing the email verification message.
", - "MessageTemplateType$EmailMessage": "The message template for email messages.
", + "CreateUserPoolRequest$EmailVerificationMessage": "A string representing the email verification message. EmailVerificationMessage is allowed only if EmailSendingAccount is DEVELOPER.
", + "MessageTemplateType$EmailMessage": "The message template for email messages. EmailMessage is allowed only if EmailSendingAccount is DEVELOPER.
", "UpdateUserPoolRequest$EmailVerificationMessage": "The contents of the email verification message.
", "UserPoolType$EmailVerificationMessage": "The contents of the email verification message.
", - "VerificationMessageTemplateType$EmailMessage": "The email message template.
" + "VerificationMessageTemplateType$EmailMessage": "The email message template. EmailMessage is allowed only if EmailSendingAccount is DEVELOPER.
" } }, "EmailVerificationSubjectByLinkType": { "base": null, "refs": { - "VerificationMessageTemplateType$EmailSubjectByLink": "The subject line for the email message template for sending a confirmation link to the user.
" + "VerificationMessageTemplateType$EmailSubjectByLink": "The subject line for the email message template for sending a confirmation link to the user. EmailSubjectByLink is allowed only EmailSendingAccount is DEVELOPER.
" } }, "EmailVerificationSubjectType": { "base": null, "refs": { - "CreateUserPoolRequest$EmailVerificationSubject": "A string representing the email verification subject.
", - "MessageTemplateType$EmailSubject": "The subject line for email messages.
", + "CreateUserPoolRequest$EmailVerificationSubject": "A string representing the email verification subject. EmailVerificationSubject is allowed only if EmailSendingAccount is DEVELOPER.
", + "MessageTemplateType$EmailSubject": "The subject line for email messages. EmailSubject is allowed only if EmailSendingAccount is DEVELOPER.
", "UpdateUserPoolRequest$EmailVerificationSubject": "The subject of the email verification message.
", "UserPoolType$EmailVerificationSubject": "The subject of the email verification message.
", - "VerificationMessageTemplateType$EmailSubject": "The subject line for the email message template.
" + "VerificationMessageTemplateType$EmailSubject": "The subject line for the email message template. EmailSubject is allowed only if EmailSendingAccount is DEVELOPER.
" } }, "EnableSoftwareTokenMFAException": { @@ -2295,7 +2322,7 @@ } }, "SMSMfaSettingsType": { - "base": "The type used for enabling SMS MFA at the user level.
", + "base": "The type used for enabling SMS MFA at the user level. Phone numbers don't need to be verified to be used for SMS MFA. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.
", "refs": { "AdminSetUserMFAPreferenceRequest$SMSMfaSettings": "The SMS text message MFA settings.
", "SetUserMFAPreferenceRequest$SMSMfaSettings": "The SMS text message multi-factor authentication (MFA) settings.
" @@ -2499,7 +2526,7 @@ } }, "SoftwareTokenMfaSettingsType": { - "base": "The type used for enabling software token MFA at the user level.
", + "base": "The type used for enabling software token MFA at the user level. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.
", "refs": { "AdminSetUserMFAPreferenceRequest$SoftwareTokenMfaSettings": "The time-based one-time password software token MFA settings.
", "SetUserMFAPreferenceRequest$SoftwareTokenMfaSettings": "The time-based one-time password software token MFA settings.
" diff --git a/models/apis/comprehend/2017-11-27/api-2.json b/models/apis/comprehend/2017-11-27/api-2.json index 9da354d64c4..78cc2066d73 100644 --- a/models/apis/comprehend/2017-11-27/api-2.json +++ b/models/apis/comprehend/2017-11-27/api-2.json @@ -304,6 +304,21 @@ {"shape":"InternalServerException"} ] }, + "DescribeEventsDetectionJob":{ + "name":"DescribeEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsDetectionJobRequest"}, + "output":{"shape":"DescribeEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ] + }, "DescribeKeyPhrasesDetectionJob":{ "name":"DescribeKeyPhrasesDetectionJob", "http":{ @@ -543,6 +558,21 @@ {"shape":"InternalServerException"} ] }, + "ListEventsDetectionJobs":{ + "name":"ListEventsDetectionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEventsDetectionJobsRequest"}, + "output":{"shape":"ListEventsDetectionJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidFilterException"}, + {"shape":"InternalServerException"} + ] + }, "ListKeyPhrasesDetectionJobs":{ "name":"ListKeyPhrasesDetectionJobs", "http":{ @@ -666,6 +696,21 @@ {"shape":"InternalServerException"} ] }, + "StartEventsDetectionJob":{ + "name":"StartEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartEventsDetectionJobRequest"}, + "output":{"shape":"StartEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"KmsKeyValidationException"}, + {"shape":"InternalServerException"} + ] + }, "StartKeyPhrasesDetectionJob":{ "name":"StartKeyPhrasesDetectionJob", "http":{ @@ -754,6 +799,20 @@ {"shape":"InternalServerException"} ] }, + "StopEventsDetectionJob":{ + "name":"StopEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopEventsDetectionJobRequest"}, + "output":{"shape":"StopEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InternalServerException"} + ] + }, "StopKeyPhrasesDetectionJob":{ "name":"StopKeyPhrasesDetectionJob", "http":{ @@ -1352,6 +1411,19 @@ "EntityRecognizerProperties":{"shape":"EntityRecognizerProperties"} } }, + "DescribeEventsDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"JobId"} + } + }, + "DescribeEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "EventsDetectionJobProperties":{"shape":"EventsDetectionJobProperties"} + } + }, "DescribeKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -1892,6 +1964,41 @@ "Type":{"shape":"EntityTypeName"} } }, + "EventTypeString":{ + "type":"string", + "max":40, + "min":1, + "pattern":"[A-Z_]*" + }, + "EventsDetectionJobFilter":{ + "type":"structure", + "members":{ + "JobName":{"shape":"JobName"}, + "JobStatus":{"shape":"JobStatus"}, + "SubmitTimeBefore":{"shape":"Timestamp"}, + "SubmitTimeAfter":{"shape":"Timestamp"} + } + }, + "EventsDetectionJobProperties":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobName":{"shape":"JobName"}, + "JobStatus":{"shape":"JobStatus"}, + "Message":{"shape":"AnyLengthString"}, + "SubmitTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"}, + "InputDataConfig":{"shape":"InputDataConfig"}, + "OutputDataConfig":{"shape":"OutputDataConfig"}, + "LanguageCode":{"shape":"LanguageCode"}, + "DataAccessRoleArn":{"shape":"IamRoleArn"}, + "TargetEventTypes":{"shape":"TargetEventTypes"} + } + }, + "EventsDetectionJobPropertiesList":{ + "type":"list", + "member":{"shape":"EventsDetectionJobProperties"} + }, "Float":{"type":"float"}, "IamRoleArn":{ "type":"string", @@ -2134,6 +2241,21 @@ "NextToken":{"shape":"String"} } }, + "ListEventsDetectionJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{"shape":"EventsDetectionJobFilter"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"MaxResultsInteger"} + } + }, + "ListEventsDetectionJobsResponse":{ + "type":"structure", + "members":{ + "EventsDetectionJobPropertiesList":{"shape":"EventsDetectionJobPropertiesList"}, + "NextToken":{"shape":"String"} + } + }, "ListKeyPhrasesDetectionJobsRequest":{ "type":"structure", "members":{ @@ -2602,6 +2724,35 @@ "JobStatus":{"shape":"JobStatus"} } }, + "StartEventsDetectionJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode", + "TargetEventTypes" + ], + "members":{ + "InputDataConfig":{"shape":"InputDataConfig"}, + "OutputDataConfig":{"shape":"OutputDataConfig"}, + "DataAccessRoleArn":{"shape":"IamRoleArn"}, + "JobName":{"shape":"JobName"}, + "LanguageCode":{"shape":"LanguageCode"}, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "idempotencyToken":true + }, + "TargetEventTypes":{"shape":"TargetEventTypes"} + } + }, + "StartEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobStatus":{"shape":"JobStatus"} + } + }, "StartKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":[ @@ -2746,6 +2897,20 @@ "JobStatus":{"shape":"JobStatus"} } }, + "StopEventsDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{"shape":"JobId"} + } + }, + "StopEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{"shape":"JobId"}, + "JobStatus":{"shape":"JobStatus"} + } + }, "StopKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -2891,6 +3056,11 @@ "max":256, "min":0 }, + "TargetEventTypes":{ + "type":"list", + "member":{"shape":"EventTypeString"}, + "min":1 + }, "TextSizeLimitExceededException":{ "type":"structure", "members":{ diff --git a/models/apis/comprehend/2017-11-27/docs-2.json b/models/apis/comprehend/2017-11-27/docs-2.json index 326ae27dfdd..64bda877e7e 100644 --- a/models/apis/comprehend/2017-11-27/docs-2.json +++ b/models/apis/comprehend/2017-11-27/docs-2.json @@ -20,6 +20,7 @@ "DescribeEndpoint": "Gets the properties associated with a specific endpoint. Use this operation to get the status of an endpoint.
", "DescribeEntitiesDetectionJob": "Gets the properties associated with an entities detection job. Use this operation to get the status of a detection job.
", "DescribeEntityRecognizer": "Provides details about an entity recognizer including status, S3 buckets containing training data, recognizer metadata, metrics, and so on.
", + "DescribeEventsDetectionJob": "Gets the status and details of an events detection job.
", "DescribeKeyPhrasesDetectionJob": "Gets the properties associated with a key phrases detection job. Use this operation to get the status of a detection job.
", "DescribePiiEntitiesDetectionJob": "Gets the properties associated with a PII entities detection job. For example, you can use this operation to get the job status.
", "DescribeSentimentDetectionJob": "Gets the properties associated with a sentiment detection job. Use this operation to get the status of a detection job.
", @@ -36,6 +37,7 @@ "ListEndpoints": "Gets a list of all existing endpoints that you've created.
", "ListEntitiesDetectionJobs": "Gets a list of the entity detection jobs that you have submitted.
", "ListEntityRecognizers": "Gets a list of the properties of all entity recognizers that you created, including recognizers currently in training. Allows you to filter the list of recognizers based on criteria such as status and submission time. This call returns up to 500 entity recognizers in the list, with a default number of 100 recognizers in the list.
The results of this list are not in any particular order. Please get the list and sort locally if needed.
", + "ListEventsDetectionJobs": "Gets a list of the events detection jobs that you have submitted.
", "ListKeyPhrasesDetectionJobs": "Get a list of key phrase detection jobs that you have submitted.
", "ListPiiEntitiesDetectionJobs": "Gets a list of the PII entity detection jobs that you have submitted.
", "ListSentimentDetectionJobs": "Gets a list of sentiment detection jobs that you have submitted.
", @@ -44,12 +46,14 @@ "StartDocumentClassificationJob": "Starts an asynchronous document classification job. Use the operation to track the progress of the job.
", "StartDominantLanguageDetectionJob": "Starts an asynchronous dominant language detection job for a collection of documents. Use the operation to track the status of a job.
", "StartEntitiesDetectionJob": "Starts an asynchronous entity detection job for a collection of documents. Use the operation to track the status of a job.
This API can be used for either standard entity detection or custom entity recognition. In order to be used for custom entity recognition, the optional EntityRecognizerArn
must be used in order to provide access to the recognizer being used to detect the custom entity.
Starts an asynchronous event detection job for a collection of documents.
", "StartKeyPhrasesDetectionJob": "Starts an asynchronous key phrase detection job for a collection of documents. Use the operation to track the status of a job.
", "StartPiiEntitiesDetectionJob": "Starts an asynchronous PII entity detection job for a collection of documents.
", "StartSentimentDetectionJob": "Starts an asynchronous sentiment detection job for a collection of documents. use the operation to track the status of a job.
", "StartTopicsDetectionJob": "Starts an asynchronous topic detection job. Use the DescribeTopicDetectionJob
operation to track the status of a job.
Stops a dominant language detection job in progress.
If the job state is IN_PROGRESS
the job is marked for termination and put into the STOP_REQUESTED
state. If the job completes before it can be stopped, it is put into the COMPLETED
state; otherwise the job is stopped and put into the STOPPED
state.
If the job is in the COMPLETED
or FAILED
state when you call the StopDominantLanguageDetectionJob
operation, the operation returns a 400 Internal Request Exception.
When a job is stopped, any documents already processed are written to the output location.
", "StopEntitiesDetectionJob": "Stops an entities detection job in progress.
If the job state is IN_PROGRESS
the job is marked for termination and put into the STOP_REQUESTED
state. If the job completes before it can be stopped, it is put into the COMPLETED
state; otherwise the job is stopped and put into the STOPPED
state.
If the job is in the COMPLETED
or FAILED
state when you call the StopDominantLanguageDetectionJob
operation, the operation returns a 400 Internal Request Exception.
When a job is stopped, any documents already processed are written to the output location.
", + "StopEventsDetectionJob": "Stops an events detection job in progress.
", "StopKeyPhrasesDetectionJob": "Stops a key phrases detection job in progress.
If the job state is IN_PROGRESS
the job is marked for termination and put into the STOP_REQUESTED
state. If the job completes before it can be stopped, it is put into the COMPLETED
state; otherwise the job is stopped and put into the STOPPED
state.
If the job is in the COMPLETED
or FAILED
state when you call the StopDominantLanguageDetectionJob
operation, the operation returns a 400 Internal Request Exception.
When a job is stopped, any documents already processed are written to the output location.
", "StopPiiEntitiesDetectionJob": "Stops a PII entities detection job in progress.
", "StopSentimentDetectionJob": "Stops a sentiment detection job in progress.
If the job state is IN_PROGRESS
the job is marked for termination and put into the STOP_REQUESTED
state. If the job completes before it can be stopped, it is put into the COMPLETED
state; otherwise the job is be stopped and put into the STOPPED
state.
If the job is in the COMPLETED
or FAILED
state when you call the StopDominantLanguageDetectionJob
operation, the operation returns a 400 Internal Request Exception.
When a job is stopped, any documents already processed are written to the output location.
", @@ -70,6 +74,7 @@ "EntitiesDetectionJobProperties$Message": "A description of the status of a job.
", "EntityRecognizerMetadataEntityTypesListItem$Type": "Type of entity from the list of entity types in the metadata of an entity recognizer.
", "EntityRecognizerProperties$Message": "A description of the status of the recognizer.
", + "EventsDetectionJobProperties$Message": "A description of the status of the events detection job.
", "KeyPhrasesDetectionJobProperties$Message": "A description of the status of a job.
", "PiiEntitiesDetectionJobProperties$Message": "A description of the status of a job.
", "SentimentDetectionJobProperties$Message": "A description of the status of a job.
", @@ -227,6 +232,7 @@ "StartDocumentClassificationJobRequest$ClientRequestToken": "A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.
", "StartDominantLanguageDetectionJobRequest$ClientRequestToken": "A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.
", "StartEntitiesDetectionJobRequest$ClientRequestToken": "A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
", + "StartEventsDetectionJobRequest$ClientRequestToken": "An unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
", "StartKeyPhrasesDetectionJobRequest$ClientRequestToken": "A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
", "StartPiiEntitiesDetectionJobRequest$ClientRequestToken": "A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
", "StartSentimentDetectionJobRequest$ClientRequestToken": "A unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
", @@ -420,6 +426,16 @@ "refs": { } }, + "DescribeEventsDetectionJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeEventsDetectionJobResponse": { + "base": null, + "refs": { + } + }, "DescribeKeyPhrasesDetectionJobRequest": { "base": null, "refs": { @@ -662,11 +678,11 @@ "EntityRecognizerEvaluationMetrics$F1Score": "A measure of how accurate the recognizer results are for the test data. It is derived from the Precision
and Recall
values. The F1Score
is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
A measure of the usefulness of the recognizer results for a specific entity type in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones.
", "EntityTypesEvaluationMetrics$Recall": "A measure of how complete the recognizer results are for a specific entity type in the test data. High recall means that the recognizer returned most of the relevant results.
", - "EntityTypesEvaluationMetrics$F1Score": "A measure of how accurate the recognizer results are for for a specific entity type in the test data. It is derived from the Precision
and Recall
values. The F1Score
is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
A measure of how accurate the recognizer results are for a specific entity type in the test data. It is derived from the Precision
and Recall
values. The F1Score
is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
The filter used to determine which endpoints are are returned. You can filter jobs on their name, model, status, or the date and time that they were created. You can only set one filter at a time.
", + "base": "The filter used to determine which endpoints are returned. You can filter jobs on their name, model, status, or the date and time that they were created. You can only set one filter at a time.
", "refs": { "ListEndpointsRequest$Filter": "Filters the endpoints that are returned. You can filter endpoints on their name, model, status, or the date and time that they were created. You can only set one filter at a time.
" } @@ -844,6 +860,31 @@ "EntityTypesList$member": null } }, + "EventTypeString": { + "base": null, + "refs": { + "TargetEventTypes$member": null + } + }, + "EventsDetectionJobFilter": { + "base": "Provides information for filtering a list of event detection jobs.
", + "refs": { + "ListEventsDetectionJobsRequest$Filter": "Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
" + } + }, + "EventsDetectionJobProperties": { + "base": "Provides information about an events detection job.
", + "refs": { + "DescribeEventsDetectionJobResponse$EventsDetectionJobProperties": "An object that contains the properties associated with an event detection job.
", + "EventsDetectionJobPropertiesList$member": null + } + }, + "EventsDetectionJobPropertiesList": { + "base": null, + "refs": { + "ListEventsDetectionJobsResponse$EventsDetectionJobPropertiesList": "A list containing the properties of each job that is returned.
" + } + }, "Float": { "base": null, "refs": { @@ -870,12 +911,14 @@ "DominantLanguageDetectionJobProperties$DataAccessRoleArn": "The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
", "EntitiesDetectionJobProperties$DataAccessRoleArn": "The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
", "EntityRecognizerProperties$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
", + "EventsDetectionJobProperties$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identify and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
", "KeyPhrasesDetectionJobProperties$DataAccessRoleArn": "The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
", "PiiEntitiesDetectionJobProperties$DataAccessRoleArn": "The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
", "SentimentDetectionJobProperties$DataAccessRoleArn": "The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
", "StartDocumentClassificationJobRequest$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
", "StartDominantLanguageDetectionJobRequest$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
", "StartEntitiesDetectionJobRequest$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
", + "StartEventsDetectionJobRequest$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
", "StartKeyPhrasesDetectionJobRequest$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
", "StartPiiEntitiesDetectionJobRequest$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
", "StartSentimentDetectionJobRequest$DataAccessRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
", @@ -898,12 +941,14 @@ "DocumentClassificationJobProperties$InputDataConfig": "The input data configuration that you supplied when you created the document classification job.
", "DominantLanguageDetectionJobProperties$InputDataConfig": "The input data configuration that you supplied when you created the dominant language detection job.
", "EntitiesDetectionJobProperties$InputDataConfig": "The input data configuration that you supplied when you created the entities detection job.
", + "EventsDetectionJobProperties$InputDataConfig": "The input data configuration that you supplied when you created the events detection job.
", "KeyPhrasesDetectionJobProperties$InputDataConfig": "The input data configuration that you supplied when you created the key phrases detection job.
", "PiiEntitiesDetectionJobProperties$InputDataConfig": "The input properties for a PII entities detection job.
", "SentimentDetectionJobProperties$InputDataConfig": "The input data configuration that you supplied when you created the sentiment detection job.
", "StartDocumentClassificationJobRequest$InputDataConfig": "Specifies the format and location of the input data for the job.
", "StartDominantLanguageDetectionJobRequest$InputDataConfig": "Specifies the format and location of the input data for the job.
", "StartEntitiesDetectionJobRequest$InputDataConfig": "Specifies the format and location of the input data for the job.
", + "StartEventsDetectionJobRequest$InputDataConfig": "Specifies the format and location of the input data for the job.
", "StartKeyPhrasesDetectionJobRequest$InputDataConfig": "Specifies the format and location of the input data for the job.
", "StartPiiEntitiesDetectionJobRequest$InputDataConfig": "The input properties for a PII entities detection job.
", "StartSentimentDetectionJobRequest$InputDataConfig": "Specifies the format and location of the input data for the job.
", @@ -965,6 +1010,7 @@ "DescribeDocumentClassificationJobRequest$JobId": "The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
", "DescribeDominantLanguageDetectionJobRequest$JobId": "The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
", "DescribeEntitiesDetectionJobRequest$JobId": "The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
", + "DescribeEventsDetectionJobRequest$JobId": "The identifier of the events detection job.
", "DescribeKeyPhrasesDetectionJobRequest$JobId": "The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
", "DescribePiiEntitiesDetectionJobRequest$JobId": "The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
", "DescribeSentimentDetectionJobRequest$JobId": "The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
", @@ -972,12 +1018,14 @@ "DocumentClassificationJobProperties$JobId": "The identifier assigned to the document classification job.
", "DominantLanguageDetectionJobProperties$JobId": "The identifier assigned to the dominant language detection job.
", "EntitiesDetectionJobProperties$JobId": "The identifier assigned to the entities detection job.
", + "EventsDetectionJobProperties$JobId": "The identifier assigned to the events detection job.
", "KeyPhrasesDetectionJobProperties$JobId": "The identifier assigned to the key phrases detection job.
", "PiiEntitiesDetectionJobProperties$JobId": "The identifier assigned to the PII entities detection job.
", "SentimentDetectionJobProperties$JobId": "The identifier assigned to the sentiment detection job.
", "StartDocumentClassificationJobResponse$JobId": "The identifier generated for the job. To get the status of the job, use this identifier with the operation.
", "StartDominantLanguageDetectionJobResponse$JobId": "The identifier generated for the job. To get the status of a job, use this identifier with the operation.
", "StartEntitiesDetectionJobResponse$JobId": "The identifier generated for the job. To get the status of job, use this identifier with the operation.
", + "StartEventsDetectionJobResponse$JobId": "An unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
", "StartKeyPhrasesDetectionJobResponse$JobId": "The identifier generated for the job. To get the status of a job, use this identifier with the operation.
", "StartPiiEntitiesDetectionJobResponse$JobId": "The identifier generated for the job.
", "StartSentimentDetectionJobResponse$JobId": "The identifier generated for the job. To get the status of a job, use this identifier with the operation.
", @@ -986,6 +1034,8 @@ "StopDominantLanguageDetectionJobResponse$JobId": "The identifier of the dominant language detection job to stop.
", "StopEntitiesDetectionJobRequest$JobId": "The identifier of the entities detection job to stop.
", "StopEntitiesDetectionJobResponse$JobId": "The identifier of the entities detection job to stop.
", + "StopEventsDetectionJobRequest$JobId": "The identifier of the events detection job to stop.
", + "StopEventsDetectionJobResponse$JobId": "The identifier of the events detection job to stop.
", "StopKeyPhrasesDetectionJobRequest$JobId": "The identifier of the key phrases detection job to stop.
", "StopKeyPhrasesDetectionJobResponse$JobId": "The identifier of the key phrases detection job to stop.
", "StopPiiEntitiesDetectionJobRequest$JobId": "The identifier of the PII entities detection job to stop.
", @@ -1004,6 +1054,8 @@ "DominantLanguageDetectionJobProperties$JobName": "The name that you assigned to the dominant language detection job.
", "EntitiesDetectionJobFilter$JobName": "Filters on the name of the job.
", "EntitiesDetectionJobProperties$JobName": "The name that you assigned the entities detection job.
", + "EventsDetectionJobFilter$JobName": "Filters on the name of the events detection job.
", + "EventsDetectionJobProperties$JobName": "The name you assigned the events detection job.
", "KeyPhrasesDetectionJobFilter$JobName": "Filters on the name of the job.
", "KeyPhrasesDetectionJobProperties$JobName": "The name that you assigned the key phrases detection job.
", "PiiEntitiesDetectionJobFilter$JobName": "Filters on the name of the job.
", @@ -1013,6 +1065,7 @@ "StartDocumentClassificationJobRequest$JobName": "The identifier of the job.
", "StartDominantLanguageDetectionJobRequest$JobName": "An identifier for the job.
", "StartEntitiesDetectionJobRequest$JobName": "The identifier of the job.
", + "StartEventsDetectionJobRequest$JobName": "The identifier of the events detection job.
", "StartKeyPhrasesDetectionJobRequest$JobName": "The identifier of the job.
", "StartPiiEntitiesDetectionJobRequest$JobName": "The identifier of the job.
", "StartSentimentDetectionJobRequest$JobName": "The identifier of the job.
", @@ -1035,6 +1088,8 @@ "DominantLanguageDetectionJobProperties$JobStatus": "The current status of the dominant language detection job. If the status is FAILED
, the Message
field shows the reason for the failure.
Filters the list of jobs based on job status. Returns only jobs with the specified status.
", "EntitiesDetectionJobProperties$JobStatus": "The current status of the entities detection job. If the status is FAILED
, the Message
field shows the reason for the failure.
Filters the list of jobs based on job status. Returns only jobs with the specified status.
", + "EventsDetectionJobProperties$JobStatus": "The current status of the events detection job.
", "KeyPhrasesDetectionJobFilter$JobStatus": "Filters the list of jobs based on job status. Returns only jobs with the specified status.
", "KeyPhrasesDetectionJobProperties$JobStatus": "The current status of the key phrases detection job. If the status is FAILED
, the Message
field shows the reason for the failure.
Filters the list of jobs based on job status. Returns only jobs with the specified status.
", @@ -1044,12 +1099,14 @@ "StartDocumentClassificationJobResponse$JobStatus": "The status of the job:
SUBMITTED - The job has been received and queued for processing.
IN_PROGRESS - Amazon Comprehend is processing the job.
COMPLETED - The job was successfully completed and the output is available.
FAILED - The job did not complete. For details, use the operation.
STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and is processing the request.
STOPPED - The job was successfully stopped without completing.
The status of the job.
SUBMITTED - The job has been received and is queued for processing.
IN_PROGRESS - Amazon Comprehend is processing the job.
COMPLETED - The job was successfully completed and the output is available.
FAILED - The job did not complete. To get details, use the operation.
The status of the job.
SUBMITTED - The job has been received and is queued for processing.
IN_PROGRESS - Amazon Comprehend is processing the job.
COMPLETED - The job was successfully completed and the output is available.
FAILED - The job did not complete. To get details, use the operation.
STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and is processing the request.
STOPPED - The job was successfully stopped without completing.
The status of the events detection job.
", "StartKeyPhrasesDetectionJobResponse$JobStatus": "The status of the job.
SUBMITTED - The job has been received and is queued for processing.
IN_PROGRESS - Amazon Comprehend is processing the job.
COMPLETED - The job was successfully completed and the output is available.
FAILED - The job did not complete. To get details, use the operation.
The status of the job.
", "StartSentimentDetectionJobResponse$JobStatus": "The status of the job.
SUBMITTED - The job has been received and is queued for processing.
IN_PROGRESS - Amazon Comprehend is processing the job.
COMPLETED - The job was successfully completed and the output is available.
FAILED - The job did not complete. To get details, use the operation.
The status of the job:
SUBMITTED - The job has been received and is queued for processing.
IN_PROGRESS - Amazon Comprehend is processing the job.
COMPLETED - The job was successfully completed and the output is available.
FAILED - The job did not complete. To get details, use the DescribeTopicDetectionJob
operation.
Either STOP_REQUESTED
if the job is currently running, or STOPPED
if the job was previously stopped with the StopDominantLanguageDetectionJob
operation.
Either STOP_REQUESTED
if the job is currently running, or STOPPED
if the job was previously stopped with the StopEntitiesDetectionJob
operation.
The status of the events detection job.
", "StopKeyPhrasesDetectionJobResponse$JobStatus": "Either STOP_REQUESTED
if the job is currently running, or STOPPED
if the job was previously stopped with the StopKeyPhrasesDetectionJob
operation.
The status of the PII entities detection job.
", "StopSentimentDetectionJobResponse$JobStatus": "Either STOP_REQUESTED
if the job is currently running, or STOPPED
if the job was previously stopped with the StopSentimentDetectionJob
operation.
The language code for the language of the documents that the classifier was trained on.
", "EntitiesDetectionJobProperties$LanguageCode": "The language code of the input documents.
", "EntityRecognizerProperties$LanguageCode": "The language of the input documents. All documents must be in the same language. Only English (\"en\") is currently supported.
", + "EventsDetectionJobProperties$LanguageCode": "The language code of the input documents.
", "KeyPhrasesDetectionJobProperties$LanguageCode": "The language code of the input documents.
", "PiiEntitiesDetectionJobProperties$LanguageCode": "The language code of the input documents
", "SentimentDetectionJobProperties$LanguageCode": "The language code of the input documents.
", "StartEntitiesDetectionJobRequest$LanguageCode": "The language of the input documents. All documents must be in the same language. You can specify any of the languages supported by Amazon Comprehend. If custom entities recognition is used, this parameter is ignored and the language used for training the model is used instead.
", + "StartEventsDetectionJobRequest$LanguageCode": "The language code of the input documents.
", "StartKeyPhrasesDetectionJobRequest$LanguageCode": "The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
", "StartPiiEntitiesDetectionJobRequest$LanguageCode": "The language of the input documents.
", "StartSentimentDetectionJobRequest$LanguageCode": "The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.
" @@ -1201,6 +1260,16 @@ "refs": { } }, + "ListEventsDetectionJobsRequest": { + "base": null, + "refs": { + } + }, + "ListEventsDetectionJobsResponse": { + "base": null, + "refs": { + } + }, "ListKeyPhrasesDetectionJobsRequest": { "base": null, "refs": { @@ -1271,7 +1340,7 @@ "ListOfLabels": { "base": null, "refs": { - "ClassifyDocumentResponse$Labels": "The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not multually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
" + "ClassifyDocumentResponse$Labels": "The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not mutually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
" } }, "ListOfPiiEntities": { @@ -1348,6 +1417,7 @@ "ListEndpointsRequest$MaxResults": "The maximum number of results to return in each page. The default is 100.
", "ListEntitiesDetectionJobsRequest$MaxResults": "The maximum number of results to return in each page. The default is 100.
", "ListEntityRecognizersRequest$MaxResults": "The maximum number of results to return on each page. The default is 100.
", + "ListEventsDetectionJobsRequest$MaxResults": "The maximum number of results to return in each page.
", "ListKeyPhrasesDetectionJobsRequest$MaxResults": "The maximum number of results to return in each page. The default is 100.
", "ListPiiEntitiesDetectionJobsRequest$MaxResults": "The maximum number of results to return in each page.
", "ListSentimentDetectionJobsRequest$MaxResults": "The maximum number of results to return in each page. The default is 100.
", @@ -1375,11 +1445,13 @@ "DocumentClassificationJobProperties$OutputDataConfig": "The output data configuration that you supplied when you created the document classification job.
", "DominantLanguageDetectionJobProperties$OutputDataConfig": "The output data configuration that you supplied when you created the dominant language detection job.
", "EntitiesDetectionJobProperties$OutputDataConfig": "The output data configuration that you supplied when you created the entities detection job.
", + "EventsDetectionJobProperties$OutputDataConfig": "The output data configuration that you supplied when you created the events detection job.
", "KeyPhrasesDetectionJobProperties$OutputDataConfig": "The output data configuration that you supplied when you created the key phrases detection job.
", "SentimentDetectionJobProperties$OutputDataConfig": "The output data configuration that you supplied when you created the sentiment detection job.
", "StartDocumentClassificationJobRequest$OutputDataConfig": "Specifies where to send the output files.
", "StartDominantLanguageDetectionJobRequest$OutputDataConfig": "Specifies where to send the output files.
", "StartEntitiesDetectionJobRequest$OutputDataConfig": "Specifies where to send the output files.
", + "StartEventsDetectionJobRequest$OutputDataConfig": "Specifies where to send the output files.
", "StartKeyPhrasesDetectionJobRequest$OutputDataConfig": "Specifies where to send the output files.
", "StartPiiEntitiesDetectionJobRequest$OutputDataConfig": "Provides configuration parameters for the output of PII entity detection jobs.
", "StartSentimentDetectionJobRequest$OutputDataConfig": "Specifies where to send the output files.
", @@ -1566,6 +1638,16 @@ "refs": { } }, + "StartEventsDetectionJobRequest": { + "base": null, + "refs": { + } + }, + "StartEventsDetectionJobResponse": { + "base": null, + "refs": { + } + }, "StartKeyPhrasesDetectionJobRequest": { "base": null, "refs": { @@ -1626,6 +1708,16 @@ "refs": { } }, + "StopEventsDetectionJobRequest": { + "base": null, + "refs": { + } + }, + "StopEventsDetectionJobResponse": { + "base": null, + "refs": { + } + }, "StopKeyPhrasesDetectionJobRequest": { "base": null, "refs": { @@ -1706,6 +1798,8 @@ "ListEntitiesDetectionJobsResponse$NextToken": "Identifies the next page of results to return.
", "ListEntityRecognizersRequest$NextToken": "Identifies the next page of results to return.
", "ListEntityRecognizersResponse$NextToken": "Identifies the next page of results to return.
", + "ListEventsDetectionJobsRequest$NextToken": "Identifies the next page of results to return.
", + "ListEventsDetectionJobsResponse$NextToken": "Identifies the next page of results to return.
", "ListKeyPhrasesDetectionJobsRequest$NextToken": "Identifies the next page of results to return.
", "ListKeyPhrasesDetectionJobsResponse$NextToken": "Identifies the next page of results to return.
", "ListPiiEntitiesDetectionJobsRequest$NextToken": "Identifies the next page of results to return.
", @@ -1796,6 +1890,13 @@ "Tag$Value": "The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.
" } }, + "TargetEventTypes": { + "base": null, + "refs": { + "EventsDetectionJobProperties$TargetEventTypes": "The types of events that are detected by the job.
", + "StartEventsDetectionJobRequest$TargetEventTypes": "The types of events to detect in the input documents.
" + } + }, "TextSizeLimitExceededException": { "base": "The size of the input text exceeds the limit. Use a smaller document.
", "refs": { @@ -1832,6 +1933,10 @@ "EntityRecognizerProperties$EndTime": "The time that the recognizer creation completed.
", "EntityRecognizerProperties$TrainingStartTime": "The time that training of the entity recognizer started.
", "EntityRecognizerProperties$TrainingEndTime": "The time that training of the entity recognizer was completed.
", + "EventsDetectionJobFilter$SubmitTimeBefore": "Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
", + "EventsDetectionJobFilter$SubmitTimeAfter": "Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
", + "EventsDetectionJobProperties$SubmitTime": "The time that the events detection job was submitted for processing.
", + "EventsDetectionJobProperties$EndTime": "The time that the events detection job completed.
", "KeyPhrasesDetectionJobFilter$SubmitTimeBefore": "Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
", "KeyPhrasesDetectionJobFilter$SubmitTimeAfter": "Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
", "KeyPhrasesDetectionJobProperties$SubmitTime": "The time that the key phrases detection job was submitted for processing.
", @@ -1910,7 +2015,7 @@ } }, "VpcConfig": { - "base": "Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for the job. For For more information, see Amazon VPC.
", + "base": "Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for the job. For more information, see Amazon VPC.
", "refs": { "CreateDocumentClassifierRequest$VpcConfig": "Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your custom classifier. For more information, see Amazon VPC.
", "CreateEntityRecognizerRequest$VpcConfig": "Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for your custom entity recognizer. For more information, see Amazon VPC.
", diff --git a/models/apis/comprehend/2017-11-27/paginators-1.json b/models/apis/comprehend/2017-11-27/paginators-1.json index 0a98c5b4e9f..3c7889ffc02 100644 --- a/models/apis/comprehend/2017-11-27/paginators-1.json +++ b/models/apis/comprehend/2017-11-27/paginators-1.json @@ -25,6 +25,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListEventsDetectionJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListKeyPhrasesDetectionJobs": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/elasticbeanstalk/2010-12-01/api-2.json b/models/apis/elasticbeanstalk/2010-12-01/api-2.json index a082cb3d0af..d535ed6bf6d 100644 --- a/models/apis/elasticbeanstalk/2010-12-01/api-2.json +++ b/models/apis/elasticbeanstalk/2010-12-01/api-2.json @@ -1296,7 +1296,7 @@ "EnvironmentId":{"shape":"EnvironmentId"}, "EnvironmentName":{"shape":"EnvironmentName"}, "NextToken":{"shape":"String"}, - "MaxItems":{"shape":"Integer"} + "MaxItems":{"shape":"ManagedActionHistoryMaxItems"} } }, "DescribeEnvironmentManagedActionHistoryResult":{ @@ -1857,6 +1857,11 @@ "max":100, "min":1 }, + "ManagedActionHistoryMaxItems":{ + "type":"integer", + "max":100, + "min":1 + }, "ManagedActionInvalidStateException":{ "type":"structure", "members":{ diff --git a/models/apis/elasticbeanstalk/2010-12-01/docs-2.json b/models/apis/elasticbeanstalk/2010-12-01/docs-2.json index 0943f1d56ac..d4cd9ae25a3 100644 --- a/models/apis/elasticbeanstalk/2010-12-01/docs-2.json +++ b/models/apis/elasticbeanstalk/2010-12-01/docs-2.json @@ -1061,7 +1061,6 @@ "Integer": { "base": null, "refs": { - "DescribeEnvironmentManagedActionHistoryRequest$MaxItems": "The maximum number of items to return for a single request.
", "Listener$Port": "The port that is used by the Listener.
" } }, @@ -1202,6 +1201,12 @@ "DescribeEnvironmentManagedActionHistoryResult$ManagedActionHistoryItems": "A list of completed and failed managed actions.
" } }, + "ManagedActionHistoryMaxItems": { + "base": null, + "refs": { + "DescribeEnvironmentManagedActionHistoryRequest$MaxItems": "The maximum number of items to return for a single request.
" + } + }, "ManagedActionInvalidStateException": { "base": "Cannot modify the managed action in its current state.
", "refs": { diff --git a/models/apis/fsx/2018-03-01/api-2.json b/models/apis/fsx/2018-03-01/api-2.json index d099a292171..4807c2c0825 100644 --- a/models/apis/fsx/2018-03-01/api-2.json +++ b/models/apis/fsx/2018-03-01/api-2.json @@ -496,7 +496,8 @@ "CREATING", "TRANSFERRING", "DELETED", - "FAILED" + "FAILED", + "PENDING" ] }, "BackupNotFound":{ diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index c587190316d..73f90b2c770 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -4,8 +4,8 @@ "operations": { "AssociateFileSystemAliases": "Use this action to associate one or more Domain Name Server (DNS) aliases with an existing Amazon FSx for Windows File Server file system. A file systen can have a maximum of 50 DNS aliases associated with it at any one time. If you try to associate a DNS alias that is already associated with the file system, FSx takes no action on that alias in the request. For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.
The system response shows the DNS aliases that Amazon FSx is attempting to associate with the file system. Use the API operation to monitor the status of the aliases Amazon FSx is associating with the file system.
", "CancelDataRepositoryTask": "Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING
or EXECUTING
state. When you cancel a task, Amazon FSx does the following.
Any files that FSx has already exported are not reverted.
FSx continues to export any files that are \"in-flight\" when the cancel operation is received.
FSx does not export any files that have not yet been exported.
Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.
For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:
a Persistent deployment type
is not linked to a data respository.
For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Lustre backups.
For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Windows backups.
If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError
. If a backup with the specified client request token doesn't exist, CreateBackup
does the following:
Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of CREATING
.
Returns the description of the backup.
By using the idempotent operation, you can retry a CreateBackup
operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.
The CreateBackup
operation returns while the backup's lifecycle state is still CREATING
. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.
Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask
operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Using Data Repository Tasks. To learn more about linking a data repository to your file system, see Setting the Export Prefix.
Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.
For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:
a Persistent deployment type
is not linked to a data respository.
For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Lustre backups.
For more information about backing up Amazon FSx for Windows file systems, see Working with FSx for Windows backups.
If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError
. If a backup with the specified client request token doesn't exist, CreateBackup
does the following:
Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of CREATING
.
Returns the description of the backup.
By using the idempotent operation, you can retry a CreateBackup
operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.
The CreateBackup
operation returns while the backup's lifecycle state is still CREATING
. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.
Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask
operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.
Creates a new, empty Amazon FSx file system.
If a file system with the specified client request token exists and the parameters match, CreateFileSystem
returns the description of the existing file system. If a file system specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError
. If a file system with the specified client request token doesn't exist, CreateFileSystem
does the following:
Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of CREATING
.
Returns the description of the file system.
This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem
operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.
The CreateFileSystem
call returns while the file system's lifecycle state is still CREATING
. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.
Creates a new Amazon FSx file system from an existing Amazon FSx backup.
If a file system with the specified client request token exists and the parameters match, this operation returns the description of the file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError
. If a file system with the specified client request token doesn't exist, this operation does the following:
Creates a new Amazon FSx file system from backup with an assigned ID, and an initial lifecycle state of CREATING
.
Returns the description of the file system.
Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.
By using the idempotent operation, you can retry a CreateFileSystemFromBackup
call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.
The CreateFileSystemFromBackup
call returns while the file system's lifecycle state is still CREATING
. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.
Deletes an Amazon FSx backup, deleting its contents. After deletion, the backup no longer exists, and its data is gone.
The DeleteBackup
call returns instantly. The backup will not show up in later DescribeBackups
calls.
The data in a deleted backup is also deleted and can't be recovered by any means.
Lists tags for an Amazon FSx file systems and backups in the case of Amazon FSx for Windows File Server.
When retrieving all tags, you can optionally specify the MaxResults
parameter to limit the number of tags in a response. If more tags remain, Amazon FSx returns a NextToken
value in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.
This action is used in an iterative process to retrieve a list of your tags. ListTagsForResource
is called first without a NextToken
value. Then the action continues to be called with the NextToken
parameter set to the value of the last NextToken
value until a response has no NextToken
.
When using this action, keep the following in mind:
The implementation might return fewer than MaxResults
file system descriptions while still including a NextToken
value.
The order of tags returned in the response of one ListTagsForResource
call and the order of tags returned across the responses of a multi-call iteration is unspecified.
Tags an Amazon FSx resource.
", "UntagResource": "This action removes a tag from an Amazon FSx resource.
", - "UpdateFileSystem": "Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.
For Amazon FSx for Windows File Server file systems, you can update the following properties:
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
SelfManagedActiveDirectoryConfiguration
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
For Amazon FSx for Lustre file systems, you can update the following properties:
AutoImportPolicy
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
WeeklyMaintenanceStartTime
Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.
For Amazon FSx for Windows File Server file systems, you can update the following properties:
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
SelfManagedActiveDirectoryConfiguration
StorageCapacity
ThroughputCapacity
WeeklyMaintenanceStartTime
For Amazon FSx for Lustre file systems, you can update the following properties:
AutoImportPolicy
AutomaticBackupRetentionDays
DailyAutomaticBackupStartTime
StorageCapacity
WeeklyMaintenanceStartTime
Describes a specific Amazon FSx Administrative Action for the current Windows file system.
", + "base": "Describes a specific Amazon FSx administrative action for the current Windows or Lustre file system.
", "refs": { "AdministrativeActions$member": null } @@ -65,7 +65,7 @@ } }, "AdministrativeActionType": { - "base": "Describes the type of administrative action, as follows:
FILE_SYSTEM_UPDATE
- A file system update administrative action initiated by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).
STORAGE_OPTIMIZATION
- Once the FILE_SYSTEM_UPDATE
task to increase a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION
task starts. Storage optimization is the process of migrating the file system data to the new, larger disks. You can track the storage migration progress using the ProgressPercent
property. When STORAGE_OPTIMIZATION
completes successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
. For more information, see Managing Storage Capacity.
FILE_SYSTEM_ALIAS_ASSOCIATION
- A file system update to associate a new DNS alias with the file system. For more information, see .
FILE_SYSTEM_ALIAS_DISASSOCIATION
- A file system update to disassociate a DNS alias from the file system. For more information, see .
Describes the type of administrative action, as follows:
FILE_SYSTEM_UPDATE
- A file system update administrative action initiated by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).
STORAGE_OPTIMIZATION
- Once the FILE_SYSTEM_UPDATE
task to increase a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION
task starts.
For Windows, storage optimization is the process of migrating the file system data to the new, larger disks.
For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.
You can track the storage optimization progress using the ProgressPercent
property. When STORAGE_OPTIMIZATION
completes successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide and Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.
FILE_SYSTEM_ALIAS_ASSOCIATION
- A file system update to associate a new DNS alias with the file system. For more information, see .
FILE_SYSTEM_ALIAS_DISASSOCIATION
- A file system update to disassociate a DNS alias from the file system. For more information, see .
An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see DNS aliases.
", + "base": "An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see DNS aliases.
", "refs": { "AssociateFileSystemAliasesResponse$Aliases": "An array of the DNS aliases that Amazon FSx is associating with the file system.
", "DescribeFileSystemAliasesResponse$Aliases": "An array of one or more DNS aliases currently associated with the specified file system.
", @@ -108,7 +108,7 @@ "base": null, "refs": { "AssociateFileSystemAliasesRequest$Aliases": "An array of one or more DNS alias names to associate with the file system. The alias name has to comply with the following formatting requirements:
Formatted as a fully-qualified domain name (FQDN), hostname.domain
, for example, accounting.corp.example.com
.
Can contain alphanumeric characters and the hyphen (-).
Cannot start or end with a hyphen.
Can start with a numeric.
For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.
", - "CreateFileSystemWindowsConfiguration$Aliases": "An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.
For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.
An alias name has to meet the following requirements:
Formatted as a fully-qualified domain name (FQDN), hostname.domain
, for example, accounting.example.com
.
Can contain alphanumeric characters and the hyphen (-).
Cannot start or end with a hyphen.
Can start with a numeric.
For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.
", + "CreateFileSystemWindowsConfiguration$Aliases": "An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.
For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.
An alias name has to meet the following requirements:
Formatted as a fully-qualified domain name (FQDN), hostname.domain
, for example, accounting.example.com
.
Can contain alphanumeric characters and the hyphen (-).
Cannot start or end with a hyphen.
Can start with a numeric.
For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.
", "DisassociateFileSystemAliasesRequest$Aliases": "An array of one or more DNS alias names to disassociate, or remove, from the file system.
" } }, @@ -152,7 +152,7 @@ } }, "Backup": { - "base": "A backup of an Amazon FSx for file system.
", + "base": "A backup of an Amazon FSx file system. For more information see:
", "refs": { "Backups$member": null, "CreateBackupResponse$Backup": "A description of the backup.
" @@ -188,9 +188,9 @@ } }, "BackupLifecycle": { - "base": "The lifecycle status of the backup.
AVAILABLE
- The backup is fully available.
CREATING
- FSx is creating the new user-intiated backup
TRANSFERRING
- For user-initiated backups on Lustre file systems only; FSx is backing up the file system.
DELETED
- The backup was deleted is no longer available.
FAILED
- Amazon FSx could not complete the backup.
The lifecycle status of the backup.
AVAILABLE
- The backup is fully available.
PENDING
- For user-initiated backups on Lustre file systems only; Amazon FSx has not started creating the backup.
CREATING
- Amazon FSx is creating the new user-intiated backup
TRANSFERRING
- For user-initiated backups on Lustre file systems only; Amazon FSx is backing up the file system.
DELETED
- Amazon FSx deleted the backup and it is no longer available.
FAILED
- Amazon FSx could not complete the backup.
The lifecycle status of the backup.
AVAILABLE
- The backup is fully available.
CREATING
- FSx is creating the backup.
TRANSFERRING
- For Lustre file systems only; FSx is transferring the backup to S3.
DELETED
- The backup was deleted is no longer available.
FAILED
- Amazon FSx could not complete the backup.
The lifecycle status of the backup.
AVAILABLE
- The backup is fully available.
PENDING
- For user-initiated backups on Lustre file systems only; Amazon FSx has not started creating the backup.
CREATING
- Amazon FSx is creating the backup.
TRANSFERRING
- For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to S3.
DELETED
- Amazon FSx deleted the backup and it is no longer available.
FAILED
- Amazon FSx could not complete the backup.
The lifecycle of the backup. Should be DELETED
.
Describes the status of the administrative action, as follows:
FAILED
- Amazon FSx failed to process the administrative action successfully.
IN_PROGRESS
- Amazon FSx is processing the administrative action.
PENDING
- Amazon FSx is waiting to process the administrative action.
COMPLETED
- Amazon FSx has finished processing the administrative task.
UPDATED_OPTIMIZING
- For a storage capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage optimization process. For more information, see Managing Storage Capacity.
Describes the status of the administrative action, as follows:
FAILED
- Amazon FSx failed to process the administrative action successfully.
IN_PROGRESS
- Amazon FSx is processing the administrative action.
PENDING
- Amazon FSx is waiting to process the administrative action.
COMPLETED
- Amazon FSx has finished processing the administrative task.
UPDATED_OPTIMIZING
- For a storage capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage optimization process. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide and Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.
The storage capacity for your Amazon FSx file system, in gibibytes.
", "refs": { "CreateFileSystemRequest$StorageCapacity": "Sets the storage capacity of the file system that you're creating.
For Lustre file systems:
For SCRATCH_2
and PERSISTENT_1 SSD
deployment types, valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.
For PERSISTENT HDD
file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.
For SCRATCH_1
deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.
For Windows file systems:
If StorageType=SSD
, valid values are 32 GiB - 65,536 GiB (64 TiB).
If StorageType=HDD
, valid values are 2000 GiB - 65,536 GiB (64 TiB).
The storage capacity of the file system in gigabytes (GB).
", - "UpdateFileSystemRequest$StorageCapacity": "Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server file system. Specifies the storage capacity target value, GiB, for the file system you're updating. The storage capacity target value must be at least 10 percent (%) greater than the current storage capacity value. In order to increase storage capacity, the file system needs to have at least 16 MB/s of throughput capacity. You cannot make a storage capacity increase request if there is an existing storage capacity increase request in progress. For more information, see Managing Storage Capacity.
" + "FileSystem$StorageCapacity": "The storage capacity of the file system in gibibytes (GiB).
", + "UpdateFileSystemRequest$StorageCapacity": "Use this parameter to increase the storage capacity of an Amazon FSx file system. Specifies the storage capacity target value, GiB, to increase the storage capacity for the file system that you're updating. You cannot make a storage capacity increase request if there is an existing storage capacity increase request in progress.
For Windows file systems, the storage capacity target value must be at least 10 percent (%) greater than the current storage capacity value. In order to increase storage capacity, the file system must have at least 16 MB/s of throughput capacity.
For Lustre file systems, the storage capacity target value can be the following:
For SCRATCH_2
and PERSISTENT_1 SSD
deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.
For PERSISTENT HDD
file systems, valid values are multiples of 6000 GiB for 12 MB/s/TiB file systems and multiples of 1800 GiB for 40 MB/s/TiB file systems. The values must be greater than the current storage capacity.
For SCRATCH_1
file systems, you cannot increase the storage capacity.
For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide and Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.
" } }, "StorageType": { diff --git a/models/apis/gamelift/2015-10-01/api-2.json b/models/apis/gamelift/2015-10-01/api-2.json index 66a58148121..9beba43ca96 100644 --- a/models/apis/gamelift/2015-10-01/api-2.json +++ b/models/apis/gamelift/2015-10-01/api-2.json @@ -1727,7 +1727,6 @@ "type":"structure", "required":[ "Name", - "GameSessionQueueArns", "RequestTimeoutSeconds", "AcceptanceRequired", "RuleSetName" @@ -1746,6 +1745,7 @@ "GameProperties":{"shape":"GamePropertyList"}, "GameSessionData":{"shape":"GameSessionData"}, "BackfillMode":{"shape":"BackfillMode"}, + "FlexMatchMode":{"shape":"FlexMatchMode"}, "Tags":{"shape":"TagList"} } }, @@ -2432,6 +2432,14 @@ "c5.12xlarge", "c5.18xlarge", "c5.24xlarge", + "c5a.large", + "c5a.xlarge", + "c5a.2xlarge", + "c5a.4xlarge", + "c5a.8xlarge", + "c5a.12xlarge", + "c5a.16xlarge", + "c5a.24xlarge", "r3.large", "r3.xlarge", "r3.2xlarge", @@ -2451,6 +2459,14 @@ "r5.12xlarge", "r5.16xlarge", "r5.24xlarge", + "r5a.large", + "r5a.xlarge", + "r5a.2xlarge", + "r5a.4xlarge", + "r5a.8xlarge", + "r5a.12xlarge", + "r5a.16xlarge", + "r5a.24xlarge", "m3.medium", "m3.large", "m3.xlarge", @@ -2467,7 +2483,15 @@ "m5.8xlarge", "m5.12xlarge", "m5.16xlarge", - "m5.24xlarge" + "m5.24xlarge", + "m5a.large", + "m5a.xlarge", + "m5a.2xlarge", + "m5a.4xlarge", + "m5a.8xlarge", + "m5a.12xlarge", + "m5a.16xlarge", + "m5a.24xlarge" ] }, "Event":{ @@ -2641,6 +2665,13 @@ "type":"list", "member":{"shape":"FleetUtilization"} }, + "FlexMatchMode":{ + "type":"string", + "enum":[ + "STANDALONE", + "WITH_QUEUE" + ] + }, "Float":{"type":"float"}, "FreeText":{"type":"string"}, "GameProperty":{ @@ -3390,7 +3421,8 @@ "CreationTime":{"shape":"Timestamp"}, "GameProperties":{"shape":"GamePropertyList"}, "GameSessionData":{"shape":"GameSessionData"}, - "BackfillMode":{"shape":"BackfillMode"} + "BackfillMode":{"shape":"BackfillMode"}, + "FlexMatchMode":{"shape":"FlexMatchMode"} } }, "MatchmakingConfigurationArn":{ @@ -4006,7 +4038,6 @@ "type":"structure", "required":[ "ConfigurationName", - "GameSessionArn", "Players" ], "members":{ @@ -4382,7 +4413,8 @@ "CustomEventData":{"shape":"CustomEventData"}, "GameProperties":{"shape":"GamePropertyList"}, "GameSessionData":{"shape":"GameSessionData"}, - "BackfillMode":{"shape":"BackfillMode"} + "BackfillMode":{"shape":"BackfillMode"}, + "FlexMatchMode":{"shape":"FlexMatchMode"} } }, "UpdateMatchmakingConfigurationOutput":{ diff --git a/models/apis/gamelift/2015-10-01/docs-2.json b/models/apis/gamelift/2015-10-01/docs-2.json index f157d569a37..33736564132 100644 --- a/models/apis/gamelift/2015-10-01/docs-2.json +++ b/models/apis/gamelift/2015-10-01/docs-2.json @@ -2,7 +2,7 @@ "version": "2.0", "service": "GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on AWS global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand.
About GameLift solutions
Get more information on these GameLift solutions in the Amazon GameLift Developer Guide.
Managed GameLift -- GameLift offers a fully managed service to set up and maintain computing machines for hosting, manage game session and player session life cycle, and handle security, storage, and performance tracking. You can use automatic scaling tools to balance hosting costs against meeting player demand., configure your game session management to minimize player latency, or add FlexMatch for matchmaking.
Managed GameLift with Realtime Servers – With GameLift Realtime Servers, you can quickly configure and set up game servers for your game. Realtime Servers provides a game server framework with core Amazon GameLift infrastructure already built in.
GameLift FleetIQ – Use GameLift FleetIQ as a standalone feature while managing your own EC2 instances and Auto Scaling groups for game hosting. GameLift FleetIQ provides optimizations that make low-cost Spot Instances viable for game hosting.
About this API Reference
This reference guide describes the low-level service API for Amazon GameLift. You can find links to language-specific SDK guides and the AWS CLI reference with each operation and data type topic. Useful links:
", "operations": { - "AcceptMatch": "Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.
When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE
. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.
To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING
, where a new game session is created for the match.
If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING
to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED
, and processing is terminated. A new matchmaking request for these players can be submitted as needed.
Learn more
Add FlexMatch to a Game Client
Related operations
", + "AcceptMatch": "Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.
When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE
. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.
To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING
, where a new game session is created for the match.
If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING
to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED
, and processing is terminated. A new matchmaking request for these players can be submitted as needed.
Learn more
Add FlexMatch to a Game Client
Related operations
", "ClaimGameServer": "This operation is used with the Amazon GameLift FleetIQ solution and game server groups.
Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, GameLift FleetIQ locates an available game server, places it in CLAIMED
status for 60 seconds, and returns connection information that players can use to connect to the game server.
To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information.
When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE
while the claim status is set to CLAIMED
for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED
(using UpdateGameServer) once players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable.
If you try to claim a specific game server, this request will fail in the following cases:
If the game server utilization status is UTILIZED
.
If the game server claim status is CLAIMED
.
When claiming a specific game server, this request will succeed even if the game server is running on an instance in DRAINING
status. To avoid this, first check the instance status by calling DescribeGameServerInstances.
Learn more
Related operations
Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. An alias provides a level of abstraction for a fleet that is useful when redirecting player traffic from one fleet to another, such as when updating your game build.
Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.
To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID and an ARN. You can reassign an alias to another fleet by calling UpdateAlias
.
Creates a new Amazon GameLift build resource for your game server binary files. Game server binaries must be combined into a zip file for use with Amazon GameLift.
When setting up a new game build for GameLift, we recommend using the AWS CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to a GameLift Amazon S3 location, and (2) it creates a new build resource.
The CreateBuild
operation can used in the following scenarios:
To create a new game build with build files that are in an S3 location under an AWS account that you control. To use this option, you must first give Amazon GameLift access to the S3 bucket. With permissions in place, call CreateBuild
and specify a build name, operating system, and the S3 storage location of your game build.
To directly upload your build files to a GameLift S3 location. To use this option, first call CreateBuild
and specify a build name and operating system. This operation creates a new build resource and also returns an S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. Build files can be uploaded to the GameLift S3 location once only; that can't be updated.
If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED
status. A build must be in READY
status before you can create fleets with it.
Learn more
Create a Build with Files in Amazon S3
Related operations
", @@ -10,8 +10,8 @@ "CreateGameServerGroup": "This operation is used with the Amazon GameLift FleetIQ solution and game server groups.
Creates a GameLift FleetIQ game server group for managing game hosting on a collection of Amazon EC2 instances for game hosting. This operation creates the game server group, creates an Auto Scaling group in your AWS account, and establishes a link between the two groups. You can view the status of your game server groups in the GameLift console. Game server group metrics and events are emitted to Amazon CloudWatch.
Before creating a new game server group, you must have the following:
An Amazon EC2 launch template that specifies how to launch Amazon EC2 instances with your game server build. For more information, see Launching an Instance from a Launch Template in the Amazon EC2 User Guide.
An IAM role that extends limited access to your AWS account to allow GameLift FleetIQ to create and interact with the Auto Scaling group. For more information, see Create IAM roles for cross-service interaction in the GameLift FleetIQ Developer Guide.
To create a new game server group, specify a unique group name, IAM role and Amazon EC2 launch template, and provide a list of instance types that can be used in the group. You must also set initial maximum and minimum limits on the group's instance count. You can optionally set an Auto Scaling policy with target tracking based on a GameLift FleetIQ metric.
Once the game server group and corresponding Auto Scaling group are created, you have full access to change the Auto Scaling group's configuration as needed. Several properties that are set when creating a game server group, including maximum/minimum size and auto-scaling policy settings, must be updated directly in the Auto Scaling group. Keep in mind that some Auto Scaling group properties are periodically updated by GameLift FleetIQ as part of its balancing activities to optimize for availability and cost.
Learn more
Related operations
Creates a multiplayer game session for players. This operation creates a game session record and assigns an available server process in the specified fleet to host the game session. A fleet must have an ACTIVE
status before a game session can be created in it.
To create a game session, specify either fleet ID or alias ID and indicate a maximum number of players to allow in the game session. You can also provide a name and game-specific properties for this game session. If successful, a GameSession object is returned containing the game session properties and other settings you specified.
Idempotency tokens. You can add a token that uniquely identifies game session requests. This is useful for ensuring that game session requests are idempotent. Multiple requests with the same idempotency token are processed only once; subsequent requests return the original result. All response values are the same with the exception of game session status, which may change.
Resource creation limits. If you are creating a game session on a fleet with a resource creation limit policy in force, then you must specify a creator ID. Without this ID, Amazon GameLift has no way to evaluate the policy for this new game session request.
Player acceptance policy. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.
Game session logs. Logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.
Available in Amazon GameLift Local.
Game session placements
Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple Regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.
Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.
Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.
To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.
Learn more
Related operations
", - "CreateMatchmakingConfiguration": "Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.
To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.
To track the progress of matchmaking tickets, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. An alternative method, continuously poling ticket status with DescribeMatchmaking, should only be used for games in development with low matchmaking usage.
Learn more
Set Up FlexMatch Event Notification
Related operations
Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.
To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.
Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.
Learn more
Related operations
Defines a new matchmaking configuration for use with FlexMatch. Whether your are using FlexMatch with GameLift hosting or as a standalone matchmaking service, the matchmaking configuration sets out rules for matching players and forming teams. If you're also using GameLift hosting, it defines how to start game sessions for each match. Your matchmaking system can use multiple configurations to handle different game scenarios. All matchmaking requests (StartMatchmaking or StartMatchBackfill) identify the matchmaking configuration to use and provide player attributes consistent with that configuration.
To create a matchmaking configuration, you must provide the following: configuration name and FlexMatch mode (with or without GameLift hosting); a rule set that specifies how to evaluate players and find acceptable matches; whether player acceptance is required; and the maximum time allowed for a matchmaking attempt. When using FlexMatch with GameLift hosting, you also need to identify the game session queue to use when starting a game session for the match.
In addition, you must set up an Amazon Simple Notification Service (SNS) to receive matchmaking notifications, and provide the topic ARN in the matchmaking configuration. An alternative method, continuously polling ticket status with DescribeMatchmaking, is only suitable for games in development with low matchmaking usage.
Learn more
Set Up FlexMatch Event Notification
Related operations
Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.
To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.
Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.
Learn more
Related operations
Reserves an open player slot in an active game session. Before a player can be added, a game session must have an ACTIVE
status, have a creation policy of ALLOW_ALL
, and have an open player slot. To add a group of players to a game session, use CreatePlayerSessions. When the player connects to the game server and references a player session ID, the game server contacts the Amazon GameLift service to validate the player reservation and accept the player.
To create a player session, specify a game session ID, player ID, and optionally a string of player data. If successful, a slot is reserved in the game session for the player and a new PlayerSession object is returned. Player sessions cannot be updated.
Available in Amazon GameLift Local.
Game session placements
Reserves open slots in a game session for a group of players. Before players can be added, a game session must have an ACTIVE
status, have a creation policy of ALLOW_ALL
, and have an open player slot. To add a single player to a game session, use CreatePlayerSession. When a player connects to the game server and references a player session ID, the game server contacts the Amazon GameLift service to validate the player reservation and accept the player.
To create player sessions, specify a game session ID, a list of player IDs, and optionally a set of player data strings. If successful, a slot is reserved in the game session for each player and a set of new PlayerSession objects is returned. Player sessions cannot be updated.
Available in Amazon GameLift Local.
Game session placements
Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is executed during an active game session.
To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations:
A locally available directory. Use the ZipFile parameter for this option.
An Amazon Simple Storage Service (Amazon S3) bucket under your AWS account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket.
If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment.
Learn more
Amazon GameLift Realtime Servers
Set Up a Role for Amazon GameLift Access
Related operations
", @@ -23,7 +23,7 @@ "DeleteGameServerGroup": "This operation is used with the Amazon GameLift FleetIQ solution and game server groups.
Terminates a game server group and permanently deletes the game server group record. You have several options for how these resources are impacted when deleting the game server group. Depending on the type of delete operation selected, this operation might affect these resources:
The game server group
The corresponding Auto Scaling group
All game servers that are currently running in the group
To delete a game server group, identify the game server group to delete and specify the type of delete operation to initiate. Game server groups can only be deleted if they are in ACTIVE
or ERROR
status.
If the delete request is successful, a series of operations are kicked off. The game server group status is changed to DELETE_SCHEDULED
, which prevents new game servers from being registered and stops automatic scaling activity. Once all game servers in the game server group are deregistered, GameLift FleetIQ can begin deleting resources. If any of the delete operations fail, the game server group is placed in ERROR
status.
GameLift FleetIQ emits delete events to Amazon CloudWatch.
Learn more
Related operations
Deletes a game session queue. Once a queue is successfully deleted, unfulfilled StartGameSessionPlacement requests that reference the queue will fail. To delete a queue, specify the queue name.
Learn more
Related operations
", "DeleteMatchmakingConfiguration": "Permanently removes a FlexMatch matchmaking configuration. To delete, specify the configuration name. A matchmaking configuration cannot be deleted if it is being used in any active matchmaking tickets.
Related operations
Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.
Learn more
Related operations
Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.
Learn more
Related operations
Deletes a fleet scaling policy. Once deleted, the policy is no longer in force and GameLift removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.
To temporarily suspend scaling policies, call StopFleetActions. This operation suspends all policies for the fleet.
Manage scaling policies:
PutScalingPolicy (auto-scaling)
DescribeScalingPolicies (auto-scaling)
DeleteScalingPolicy (auto-scaling)
Manage fleet actions:
Deletes a Realtime script. This operation permanently deletes the script record. If script files were uploaded, they are also deleted (files stored in an S3 bucket are not deleted).
To delete a script, specify the script ID. Before deleting a script, be sure to terminate all fleets that are deployed with the script being deleted. Fleet instances periodically check for script updates, and if the script record no longer exists, the instance will go into an error state and be unable to host game sessions.
Learn more
Amazon GameLift Realtime Servers
Related operations
", "DeleteVpcPeeringAuthorization": "Cancels a pending VPC peering authorization for the specified VPC. If you need to delete an existing VPC peering connection, call DeleteVpcPeeringConnection.
Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the Region.
Learn more
Related operations
", "DescribeGameSessions": "Retrieves a set of one or more game sessions. Request a specific game session or request all game sessions on a fleet. Alternatively, use SearchGameSessions to request a set of active game sessions that are filtered by certain criteria. To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails.
To get game sessions, specify one of the following: game session ID, fleet ID, or alias ID. You can filter this request by game session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session matching the request.
Available in Amazon GameLift Local.
Game session placements
Retrieves information about a fleet's instances, including instance IDs. Use this operation to get details on all instances in the fleet or get details on one specific instance.
To get a specific instance, specify fleet ID and instance ID. To get all instances in a fleet, specify a fleet ID only. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, an Instance object is returned for each result.
Learn more
Remotely Access Fleet Instances
Related operations
", - "DescribeMatchmaking": "Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including--after a successful match is made--connection information for the resulting new game session.
To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.
This operation is not designed to be continually called to track matchmaking ticket status. This practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking should only be used for games in development with low matchmaking usage.
Learn more
Add FlexMatch to a Game Client
Set Up FlexMatch Event Notification
Related operations
", - "DescribeMatchmakingConfigurations": "Retrieves the details of FlexMatch matchmaking configurations.
This operation offers the following options: (1) retrieve all matchmaking configurations, (2) retrieve configurations for a specified list, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages.
If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.
Learn more
Setting Up FlexMatch Matchmakers
Related operations
Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.
Learn more
Related operations
Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including--after a successful match is made--connection information for the resulting new game session.
To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.
This operation is not designed to be continually called to track matchmaking ticket status. This practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking should only be used for games in development with low matchmaking usage.
Learn more
Add FlexMatch to a Game Client
Set Up FlexMatch Event Notification
Related operations
", + "DescribeMatchmakingConfigurations": "Retrieves the details of FlexMatch matchmaking configurations.
This operation offers the following options: (1) retrieve all matchmaking configurations, (2) retrieve configurations for a specified list, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages.
If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.
Learn more
Setting Up FlexMatch Matchmakers
Related operations
Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.
Learn more
Related operations
Retrieves properties for one or more player sessions. This operation can be used in several ways: (1) provide a PlayerSessionId
to request properties for a specific player session; (2) provide a GameSessionId
to request properties for all player sessions in the specified game session; (3) provide a PlayerId
to request properties for all player sessions of a specified player.
To get game session record(s), specify only one of the following: a player session ID, a game session ID, or a player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a PlayerSession object is returned for each session matching the request.
Available in Amazon GameLift Local.
Game session placements
Retrieves a fleet's runtime configuration settings. The runtime configuration tells Amazon GameLift which server processes to run (and how) on each instance in the fleet.
To get a runtime configuration, specify the fleet's unique identifier. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty.
Learn more
Running Multiple Processes on a Fleet
Related operations
Describe fleets:
Retrieves all scaling policies applied to a fleet.
To get a fleet's scaling policies, specify the fleet ID. You can filter this request by policy status, such as to retrieve only active scaling policies. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, set of ScalingPolicy objects is returned for the fleet.
A fleet may have all of its scaling policies suspended (StopFleetActions). This operation does not affect the status of the scaling policies, which remains ACTIVE. To see whether a fleet's scaling policies are in force or suspended, call DescribeFleetAttributes and check the stopped actions.
Manage scaling policies:
PutScalingPolicy (auto-scaling)
DescribeScalingPolicies (auto-scaling)
DeleteScalingPolicy (auto-scaling)
Manage fleet actions:
Retrieves all active game sessions that match a set of search criteria and sorts them in a specified order. You can search or sort by the following game session attributes:
gameSessionId -- A unique identifier for the game session. You can use either a GameSessionId
or GameSessionArn
value.
gameSessionName -- Name assigned to a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession. Game session names do not need to be unique to a game session.
gameSessionProperties -- Custom data defined in a game session's GameProperty
parameter. GameProperty
values are stored as key:value pairs; the filter expression must indicate the key and a string to search the data values for. For example, to search for game sessions with custom data containing the key:value pair \"gameMode:brawl\", specify the following: gameSessionProperties.gameMode = \"brawl\"
. All custom data values are searched as strings.
maximumSessions -- Maximum number of player sessions allowed for a game session. This value is set when requesting a new game session with CreateGameSession or updating with UpdateGameSession.
creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds.
playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out.
hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join.
Returned values for playerSessionCount
and hasAvailablePlayerSessions
change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join.
To search or sort, specify either a fleet ID or an alias ID, and provide a search filter expression, a sort expression, or both. If successful, a collection of GameSession objects matching the request is returned. Use the pagination parameters to retrieve results as a set of sequential pages.
You can search for game sessions one fleet at a time only. To find game sessions across multiple fleets, you must search each fleet separately and combine the results. This search feature finds only game sessions that are in ACTIVE
status. To locate games in statuses other than active, use DescribeGameSessionDetails.
Game session placements
Resumes activity on a fleet that was suspended with StopFleetActions. Currently, this operation is used to restart a fleet's auto-scaling activity.
To start fleet actions, specify the fleet ID and the type of actions to restart. When auto-scaling fleet actions are restarted, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes.
Learn more
Related operations
Places a request for a new game session in a queue (see CreateGameSessionQueue). When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out.
A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request.
When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order.
Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players.
To place a new game session request, specify the following:
The queue name and a set of game session properties and settings
A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request
(Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player)
Latency data for all players (if you want to optimize game play for the players)
If successful, a new game session placement is created.
To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED
, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue.
Game session placements
Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.
To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.
The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.
Learn more
Backfill Existing Games with FlexMatch
Related operations
", - "StartMatchmaking": "Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration
.
To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED
.
Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration.
Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:
Your client code submits a StartMatchmaking
request for one or more players and tracks the status of the request ticket.
FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.
If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE
. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch
for more details).
Once a match is proposed and accepted, the matchmaking tickets move into status PLACING
. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.
When the match is successfully placed, the matchmaking tickets move into COMPLETED
status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.
Learn more
Add FlexMatch to a Game Client
Set Up FlexMatch Event Notification
Related operations
", + "StartMatchBackfill": "Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.
To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.
The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.
Learn more
Backfill Existing Games with FlexMatch
Related operations
", + "StartMatchmaking": "Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules. If you're also using GameLift hosting, a new game session is started for the matched players. Each matchmaking request identifies one or more players to find a match for, and specifies the type of match to build, including the team configuration and the rules for an acceptable match. When a matchmaking request identifies a group of players who want to play together, FlexMatch finds additional players to fill the match. Match type, rules, and other features are defined in a MatchmakingConfiguration
.
To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. For each player, you must also include the player attribute values that are required by the matchmaking configuration (in the rule set). If successful, a matchmaking ticket is returned with status set to QUEUED
.
Track the status of the ticket to respond as needed. If you're also using GameLift hosting, a successfully completed ticket contains game session connection information. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration.
Learn more
Add FlexMatch to a Game Client
Set Up FlexMatch Event Notification
Related operations
", "StopFleetActions": "Suspends activity on a fleet. Currently, this operation is used to stop a fleet's auto-scaling activity. It is used to temporarily stop triggering scaling events. The policies can be retained and auto-scaling activity can be restarted using StartFleetActions. You can view a fleet's stopped actions using DescribeFleetAttributes.
To stop fleet actions, specify the fleet ID and the type of actions to suspend. When auto-scaling fleet actions are stopped, Amazon GameLift no longer initiates scaling events except in response to manual changes using UpdateFleetCapacity.
Learn more
Related operations
Cancels a game session placement that is in PENDING
status. To stop a placement, provide the placement ID values. If successful, the placement is moved to CANCELLED
status.
Game session placements
Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED
.
This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData
of an updated game session object, which is provided to the game server.
If the operation is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).
Learn more
Add FlexMatch to a Game Client
Related operations
", + "StopMatchmaking": "Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED
.
This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData
of an updated game session object, which is provided to the game server.
If the operation is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).
Learn more
Add FlexMatch to a Game Client
Related operations
", "SuspendGameServerGroup": "This operation is used with the Amazon GameLift FleetIQ solution and game server groups.
Temporarily stops activity on a game server group without terminating instances or the game server group. You can restart activity by calling ResumeGameServerGroup. You can suspend the following activity:
Instance type replacement - This activity evaluates the current game hosting viability of all Spot instance types that are defined for the game server group. It updates the Auto Scaling group to remove nonviable Spot Instance types, which have a higher chance of game server interruptions. It then balances capacity across the remaining viable Spot Instance types. When this activity is suspended, the Auto Scaling group continues with its current balance, regardless of viability. Instance protection, utilization metrics, and capacity scaling activities continue to be active.
To suspend activity, specify a game server group ARN and the type of activity to be suspended. If successful, a GameServerGroup object is returned showing that the activity is listed in SuspendedActions
.
Learn more
Related operations
Assigns a tag to a GameLift resource. AWS resource tags provide an additional management tool set. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize AWS cost breakdowns, etc. This operation handles the permissions necessary to manage tags for the following GameLift resource types:
Build
Script
Fleet
Alias
GameSessionQueue
MatchmakingConfiguration
MatchmakingRuleSet
To add a tag to a resource, specify the unique ARN value for the resource and provide a tag list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the specified resource.
Learn more
Tagging AWS Resources in the AWS General Reference
Related operations
", "UntagResource": "Removes a tag that is assigned to a GameLift resource. Resource tags are used to organize AWS resources for a range of purposes. This operation handles the permissions necessary to manage tags for the following GameLift resource types:
Build
Script
Fleet
Alias
GameSessionQueue
MatchmakingConfiguration
MatchmakingRuleSet
To remove a tag from a resource, specify the unique ARN value for the resource and provide a string list containing one or more tags to be removed. This operation succeeds even if the list includes tags that are not currently assigned to the specified resource.
Learn more
Tagging AWS Resources in the AWS General Reference
Related operations
", @@ -88,10 +88,10 @@ "UpdateGameServerGroup": "This operation is used with the Amazon GameLift FleetIQ solution and game server groups.
Updates GameLift FleetIQ-specific properties for a game server group. Many Auto Scaling group properties are updated on the Auto Scaling group directly, including the launch template, Auto Scaling policies, and maximum/minimum/desired instance counts.
To update the game server group, specify the game server group ID and provide the updated values. Before applying the updates, the new values are validated to ensure that GameLift FleetIQ can continue to perform instance balancing activity. If successful, a GameServerGroup object is returned.
Learn more
Related operations
Updates game session properties. This includes the session name, maximum player count, protection policy, which controls whether or not an active game session can be terminated during a scale-down event, and the player session creation policy, which controls whether or not new players can join the session. To update a game session, specify the game session ID and the values you want to change. If successful, an updated GameSession object is returned.
Game session placements
Updates settings for a game session queue, which determines how new game session requests in the queue are processed. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.
Learn more
Related operations
", - "UpdateMatchmakingConfiguration": "Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.
Learn more
Related operations
Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.
Learn more
Related operations
Updates the current runtime configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in an ACTIVE
status.
To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration
object with an updated set of server process configurations.
Each instance in a Amazon GameLift fleet checks regularly for an updated runtime configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; runtime configuration changes are applied gradually as existing processes shut down and new processes are launched during Amazon GameLift's normal process recycling activity.
Learn more
Related operations
Update fleets:
Updates Realtime script metadata and content.
To update script metadata, specify the script ID and provide updated name and/or version values.
To update script content, provide an updated zip file by pointing to either a local file or an Amazon S3 bucket location. You can use either method regardless of how the original script was uploaded. Use the Version parameter to track updates to the script.
If the call is successful, the updated metadata is stored in the script record and a revised script is uploaded to the Amazon GameLift service. Once the script is updated and acquired by a fleet instance, the new version is used for all new game sessions.
Learn more
Amazon GameLift Realtime Servers
Related operations
", - "ValidateMatchmakingRuleSet": "Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.
Learn more
Related operations
Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.
Learn more
Related operations
The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL
when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC
to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.
The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.
", - "UpdateMatchmakingConfigurationInput$BackfillMode": "The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.
" + "CreateMatchmakingConfigurationInput$BackfillMode": "The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL
when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC
to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode
is set to STANDALONE
.
The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode
is set to STANDALONE
.
The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode
is set to STANDALONE
.
A flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE
.
A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.
", - "UpdateMatchmakingConfigurationInput$AcceptanceRequired": "A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.
", + "CreateMatchmakingConfigurationInput$AcceptanceRequired": "A flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE
. With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
to indicate when a completed potential match is waiting for player acceptance.
A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE. When this option is enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
to indicate when a completed potential match is waiting for player acceptance.
A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE. With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
to indicate when a completed potential match is waiting for player acceptance.
A response indicating whether the rule set is valid.
" } }, @@ -1003,6 +1003,14 @@ "DescribeFleetUtilizationOutput$FleetUtilization": "A collection of objects containing utilization information for each requested fleet ID.
" } }, + "FlexMatchMode": { + "base": null, + "refs": { + "CreateMatchmakingConfigurationInput$FlexMatchMode": "Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.
STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.
WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift queue to start a game session for the match.
Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.
STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.
WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift queue to start a game session for the match.
Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.
STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.
WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift queue to start a game session for the match.
Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).
", - "CreateMatchmakingConfigurationInput$GameProperties": "A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.
", + "CreateMatchmakingConfigurationInput$GameProperties": "A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode
is set to STANDALONE
.
Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). You can search for active game sessions based on this custom data with SearchGameSessions.
", "GameSessionPlacement$GameProperties": "Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).
", - "MatchmakingConfiguration$GameProperties": "A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.
", + "MatchmakingConfiguration$GameProperties": "A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used when FlexMatchMode
is set to STANDALONE
.
Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).
", - "UpdateMatchmakingConfigurationInput$GameProperties": "A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.
" + "UpdateMatchmakingConfigurationInput$GameProperties": "A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode
is set to STANDALONE
.
The type of delete to perform. Options include the following:
SAFE_DELETE
– Terminates the game server group and EC2 Auto Scaling group only when it has no game servers that are in UTILIZED
status.
FORCE_DELETE
– Terminates the game server group, including all active game servers regardless of their utilization status, and the EC2 Auto Scaling group.
RETAIN
– Does a safe delete of the game server group but retains the EC2 Auto Scaling group as is.
The type of delete to perform. Options include the following:
SAFE_DELETE
– (default) Terminates the game server group and EC2 Auto Scaling group only when it has no game servers that are in UTILIZED
status.
FORCE_DELETE
– Terminates the game server group, including all active game servers regardless of their utilization status, and the EC2 Auto Scaling group.
RETAIN
– Does a safe delete of the game server group but retains the EC2 Auto Scaling group as is.
Connection information for the new game session that is created with matchmaking. (with StartMatchmaking). Once a match is set, the FlexMatch engine places the match and creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.
", + "base": "Connection information for a new game session that is created in response to a StartMatchmaking request. Once a match is made, the FlexMatch engine creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.
", "refs": { - "MatchmakingTicket$GameSessionConnectionInfo": "Identifier and connection information of the game session created for the match. This information is added to the ticket only after the matchmaking request has been successfully completed.
" + "MatchmakingTicket$GameSessionConnectionInfo": "Identifier and connection information of the game session created for the match. This information is added to the ticket only after the matchmaking request has been successfully completed. This parameter is not set when FlexMatch is being used without GameLift hosting.
" } }, "GameSessionData": { "base": null, "refs": { "CreateGameSessionInput$GameSessionData": "Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).
", - "CreateMatchmakingConfigurationInput$GameSessionData": "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.
", + "CreateMatchmakingConfigurationInput$GameSessionData": "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode
is set to STANDALONE
.
Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).
", "GameSessionPlacement$GameSessionData": "Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).
", - "MatchmakingConfiguration$GameSessionData": "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.
", + "MatchmakingConfiguration$GameSessionData": "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used when FlexMatchMode
is set to STANDALONE
.
Set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session).
", - "UpdateMatchmakingConfigurationInput$GameSessionData": "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.
" + "UpdateMatchmakingConfigurationInput$GameSessionData": "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode
is set to STANDALONE
.
Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).
", - "GameSessionPlacement$MatchmakerData": "Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.
" + "GameSession$MatchmakerData": "Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).
", + "GameSessionPlacement$MatchmakerData": "Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.
" } }, "MatchmakingAcceptanceTimeoutInteger": { "base": null, "refs": { - "CreateMatchmakingConfigurationInput$AcceptanceTimeoutSeconds": "The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.
", - "MatchmakingConfiguration$AcceptanceTimeoutSeconds": "The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.
", - "UpdateMatchmakingConfigurationInput$AcceptanceTimeoutSeconds": "The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.
" + "CreateMatchmakingConfigurationInput$AcceptanceTimeoutSeconds": "The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.
", + "MatchmakingConfiguration$AcceptanceTimeoutSeconds": "The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.
", + "UpdateMatchmakingConfigurationInput$AcceptanceTimeoutSeconds": "The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.
" } }, "MatchmakingConfiguration": { @@ -1679,7 +1687,7 @@ "MatchmakingConfigurationArn": { "base": "Data type used for Matchmaking Configuration ARN.", "refs": { - "MatchmakingConfiguration$ConfigurationArn": "Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift configuration ARN, the resource ID matches the Name value.
", + "MatchmakingConfiguration$ConfigurationArn": "Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift configuration ARN, the resource ID matches the Name value.
", "MatchmakingTicket$ConfigurationArn": "The Amazon Resource Name (ARN) associated with the GameLift matchmaking configuration resource that is used with this ticket.
" } }, @@ -1743,7 +1751,7 @@ } }, "MatchmakingRuleSet": { - "base": "Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.
A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.
Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.
Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.
Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.
Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.
Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.
A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.
Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.
Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.
Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.
Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.
The newly created matchmaking rule set.
", "MatchmakingRuleSetList$member": null @@ -1840,9 +1848,9 @@ "AwsCredentials$SecretAccessKey": "Temporary secret key allowing access to the Amazon GameLift S3 account.
", "AwsCredentials$SessionToken": "Token used to associate a specific build ID with the files uploaded using these credentials.
", "ConflictException$Message": null, - "CreateFleetInput$InstanceRoleArn": "A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN from the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.
", + "CreateFleetInput$InstanceRoleArn": "A unique identifier for an AWS IAM role that manages access to your AWS services. Fleets with an instance role ARN allow applications that are running on the fleet's instances to assume the role. Learn more about using on-box credentials for your game servers at Access external resources from a game server. To call this operation with instance role ARN, you must have IAM PassRole permissions. See IAM policy examples for GameLift.
", "Event$Message": "Additional information related to the event.
", - "FleetAttributes$InstanceRoleArn": "A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN from the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.
", + "FleetAttributes$InstanceRoleArn": "A unique identifier for an AWS IAM role that manages access to your AWS services.
", "FleetCapacityExceededException$Message": null, "GameSessionFullException$Message": null, "IdempotentParameterMismatchException$Message": null, @@ -2101,7 +2109,7 @@ "base": null, "refs": { "MatchmakingTicket$Players": "A set of Player
objects, each representing a player to find matches for. Players are identified by a unique player ID and may include latency data for use during matchmaking. If the ticket is in status COMPLETED
, the Player
objects include the team the players were assigned to in the resulting match.
Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.
PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData
property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.
LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the Region that the game session is currently in. Do not include latency values for any other Region.
Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.
PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData
property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.
LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the Region that the game session is currently in. Do not include latency values for any other Region.
Information on each player to be matched. This information must include a player ID, and may contain player attributes and latency data to be used in the matchmaking process. After a successful match, Player
objects contain the name of the team the player is assigned to.
Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.
", - "MatchmakingConfiguration$GameSessionQueueArns": "Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. GameLift uses the listed queues when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.
", - "UpdateMatchmakingConfigurationInput$GameSessionQueueArns": "Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.
" + "CreateMatchmakingConfigurationInput$GameSessionQueueArns": "Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode
is set to STANDALONE
, do not set this parameter.
Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. Thais property is not set when FlexMatchMode
is set to STANDALONE
.
Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode
is set to STANDALONE
, do not set this parameter.
The location in S3 where build or script files are stored for access by Amazon GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript requests.
", "refs": { - "CreateBuildInput$StorageLocation": "Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an S3 bucket that you own. The storage location must specify an S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your S3 bucket. The S3 bucket and your new build must be in the same Region.
", + "CreateBuildInput$StorageLocation": "The location where your game build files are stored. Use this parameter only when creating a build using files that are stored in an S3 bucket that you own. Identify an S3 bucket name and key, which must in the same Region where you're creating a build. This parameter must also specify the ARN for an IAM role that you've set up to give Amazon GameLift access your S3 bucket. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.
", "CreateBuildOutput$StorageLocation": "Amazon S3 location for your game build file, including bucket name and key.
", - "CreateScriptInput$StorageLocation": "The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion
parameter to specify an earlier version.
The Amazon S3 location of your Realtime scripts. The storage location must specify the S3 bucket name, the zip file name (the \"key\"), and an IAM role ARN that allows Amazon GameLift to access the S3 storage location. The S3 bucket must be in the same Region where you are creating a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion
parameter to specify an earlier version. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.
Amazon S3 path and key, identifying where the game build files are stored.
", "Script$StorageLocation": null, - "UpdateScriptInput$StorageLocation": "The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion
parameter to specify an earlier version.
The Amazon S3 location of your Realtime scripts. The storage location must specify the S3 bucket name, the zip file name (the \"key\"), and an IAM role ARN that allows Amazon GameLift to access the S3 storage location. The S3 bucket must be in the same Region as the script you're updating. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion
parameter to specify an earlier version. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.
An SNS topic ARN that is set up to receive matchmaking notifications.
", "MatchmakingConfiguration$NotificationTarget": "An SNS topic ARN that is set up to receive matchmaking notifications.
", - "UpdateMatchmakingConfigurationInput$NotificationTarget": "An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.
" + "UpdateMatchmakingConfigurationInput$NotificationTarget": "An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.
" } }, "SortOrder": { @@ -2820,7 +2828,7 @@ "VpcSubnets": { "base": null, "refs": { - "CreateGameServerGroupInput$VpcSubnets": "A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all GameLift FleetIQ-supported Availability Zones are used. You can use this parameter to specify VPCs that you've set up. This property cannot be updated after the game server group is created, and the corresponding Auto Scaling group will always use the property value that is set with this request, even if the Auto Scaling group is updated directly
" + "CreateGameServerGroupInput$VpcSubnets": "A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all GameLift FleetIQ-supported Availability Zones are used. You can use this parameter to specify VPCs that you've set up. This property cannot be updated after the game server group is created, and the corresponding Auto Scaling group will always use the property value that is set with this request, even if the Auto Scaling group is updated directly.
" } }, "WeightedCapacity": { @@ -2835,7 +2843,7 @@ "CreateGameServerGroupInput$MinSize": "The minimum number of instances allowed in the EC2 Auto Scaling group. During automatic scaling events, GameLift FleetIQ and EC2 do not scale down the group below this minimum. In production, this value should be set to at least 1. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the AWS console or APIs.
", "CreateGameSessionInput$MaximumPlayerSessionCount": "The maximum number of players that can be connected simultaneously to the game session.
", "CreateGameSessionQueueInput$TimeoutInSeconds": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT
status.
The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.
", + "CreateMatchmakingConfigurationInput$AdditionalPlayerCount": "The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used if FlexMatchMode
is set to STANDALONE
.
Ideal number of active instances in the fleet.
", "EC2InstanceCounts$MINIMUM": "The minimum value allowed for the fleet's instance count.
", "EC2InstanceCounts$MAXIMUM": "The maximum value allowed for the fleet's instance count.
", @@ -2853,7 +2861,7 @@ "GameSession$MaximumPlayerSessionCount": "The maximum number of players that can be connected simultaneously to the game session.
", "GameSessionPlacement$MaximumPlayerSessionCount": "The maximum number of players that can be connected simultaneously to the game session.
", "GameSessionQueue$TimeoutInSeconds": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT
status.
The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.
", + "MatchmakingConfiguration$AdditionalPlayerCount": "The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used when FlexMatchMode
is set to STANDALONE
.
Average amount of time (in seconds) that players are currently waiting for a match. If there is not enough recent data, this property may be empty.
", "PlayerLatencyPolicy$MaximumIndividualPlayerLatencyMilliseconds": "The maximum latency value that is allowed for any player, in milliseconds. All policies must have a value set for this property.
", "PlayerLatencyPolicy$PolicyDurationSeconds": "The length of time, in seconds, that the policy is enforced while placing a new game session. A null value for this property means that the policy is enforced until the queue times out.
", @@ -2865,7 +2873,7 @@ "UpdateFleetCapacityInput$MaxSize": "The maximum value allowed for the fleet's instance count. Default if not set is 1.
", "UpdateGameSessionInput$MaximumPlayerSessionCount": "The maximum number of players that can be connected simultaneously to the game session.
", "UpdateGameSessionQueueInput$TimeoutInSeconds": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT
status.
The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.
" + "UpdateMatchmakingConfigurationInput$AdditionalPlayerCount": "The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used if FlexMatchMode
is set to STANDALONE
.
Creates a dashboard in an AWS IoT SiteWise Monitor project.
", "CreateGateway": "Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to AWS IoT SiteWise. For more information, see Ingesting data using a gateway in the AWS IoT SiteWise User Guide.
", "CreatePortal": "Creates a portal, which can contain projects and dashboards. AWS IoT SiteWise Monitor uses AWS SSO or IAM to authenticate portal users and manage user permissions.
Before you can sign in to a new portal, you must add at least one identity to that portal. For more information, see Adding or removing portal administrators in the AWS IoT SiteWise User Guide.
Creates a pre-signed URL to a portal. Use this operation to create URLs to portals that use AWS Identity and Access Management (IAM) to authenticate users. An IAM user with access to a portal can call this API to get a URL to that portal. The URL contains a session token that lets the IAM user access the portal.
", + "CreatePresignedPortalUrl": "Creates a pre-signed URL to a portal. Use this operation to create URLs to portals that use AWS Identity and Access Management (IAM) to authenticate users. An IAM user with access to a portal can call this API to get a URL to that portal. The URL contains an authentication token that lets the IAM user access the portal.
", "CreateProject": "Creates a project in the specified portal.
", "DeleteAccessPolicy": "Deletes an access policy that grants the specified identity access to the specified AWS IoT SiteWise Monitor resource. You can use this operation to revoke access to an AWS IoT SiteWise Monitor resource.
", "DeleteAsset": "Deletes an asset. This action can't be undone. For more information, see Deleting assets and models in the AWS IoT SiteWise User Guide.
You can't delete an asset that's associated to another asset. For more information, see DisassociateAssets.
Retrieves information about an asset model.
", "DescribeAssetProperty": "Retrieves information about an asset property.
When you call this operation for an attribute property, this response includes the default attribute value that you define in the asset model. If you update the default value in the model, this operation's response includes the new default value.
This operation doesn't return the value of the asset property. To get the value of an asset property, use GetAssetPropertyValue.
", "DescribeDashboard": "Retrieves information about a dashboard.
", + "DescribeDefaultEncryptionConfiguration": "Retrieves information about the default encryption configuration for the AWS account in the default or specified region. For more information, see Key management in the AWS IoT SiteWise User Guide.
", "DescribeGateway": "Retrieves information about a gateway.
", "DescribeGatewayCapabilityConfiguration": "Retrieves information about a gateway capability configuration. Each gateway capability defines data sources for a gateway. A capability configuration can contain multiple data source configurations. If you define OPC-UA sources for a gateway in the AWS IoT SiteWise console, all of your OPC-UA sources are stored in one capability configuration. To list all capability configurations for a gateway, use DescribeGateway.
", "DescribeLoggingOptions": "Retrieves the current AWS IoT SiteWise logging options.
", @@ -45,6 +46,7 @@ "ListProjectAssets": "Retrieves a paginated list of assets associated with an AWS IoT SiteWise Monitor project.
", "ListProjects": "Retrieves a paginated list of projects for an AWS IoT SiteWise Monitor portal.
", "ListTagsForResource": "Retrieves the list of tags for an AWS IoT SiteWise resource.
", + "PutDefaultEncryptionConfiguration": "Sets the default encryption configuration for the AWS account. For more information, see Key management in the AWS IoT SiteWise User Guide.
", "PutLoggingOptions": "Sets logging options for AWS IoT SiteWise.
", "TagResource": "Adds tags to an AWS IoT SiteWise resource. If a tag already exists for the resource, this operation updates the tag's value.
", "UntagResource": "Removes a tag from an AWS IoT SiteWise resource.
", @@ -77,6 +79,7 @@ "DescribeAssetModelResponse$assetModelArn": "The ARN of the asset model, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}
The ARN of the asset, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}
The ARN of the dashboard, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}
The key ARN of the customer managed customer master key (CMK) used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION
.
The ARN of the gateway, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}
The ARN of the portal, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:portal/${PortalId}
The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.
", @@ -85,6 +88,7 @@ "IAMUserIdentity$arn": "The ARN of the IAM user. IAM users must have the iotsitewise:CreatePresignedPortalUrl
permission to sign in to the portal. For more information, see IAM ARNs in the IAM User Guide.
If you delete the IAM user, access policies that contain this identity include an empty arn
. You can delete the access policy for the IAM user that no longer exists.
The ARN of the IAM user. For more information, see IAM ARNs in the IAM User Guide. This parameter is required if you specify IAM
for identityType
.
The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.
", + "PutDefaultEncryptionConfigurationResponse$kmsKeyArn": "The Key ARN of the AWS KMS CMK used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION
.
The ARN of a service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.
" } }, @@ -492,6 +496,25 @@ "UpdateProjectRequest$clientToken": "A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.
" } }, + "ConfigurationErrorDetails": { + "base": "", + "refs": { + "ConfigurationStatus$error": "" + } + }, + "ConfigurationState": { + "base": null, + "refs": { + "ConfigurationStatus$state": "" + } + }, + "ConfigurationStatus": { + "base": "", + "refs": { + "DescribeDefaultEncryptionConfigurationResponse$configurationStatus": "The status of the account configuration. This contains the ConfigurationState
. If there's an error, it also contains the ErrorDetails
.
The status of the account configuration. This contains the ConfigurationState
. If there is an error, it also contains the ErrorDetails
.
Your request has conflicting operations. This can occur if you're trying to perform more than one operation on the same resource at the same time.
", "refs": { @@ -718,6 +741,16 @@ "refs": { } }, + "DescribeDefaultEncryptionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "DescribeDefaultEncryptionConfigurationResponse": { + "base": null, + "refs": { + } + }, "DescribeGatewayCapabilityConfigurationRequest": { "base": null, "refs": { @@ -802,6 +835,14 @@ "UpdatePortalRequest$portalContactEmail": "The AWS administrator's contact email address.
" } }, + "EncryptionType": { + "base": null, + "refs": { + "DescribeDefaultEncryptionConfigurationResponse$encryptionType": "The type of encryption used for the encryption configuration.
", + "PutDefaultEncryptionConfigurationRequest$encryptionType": "The type of encryption used for the encryption configuration.
", + "PutDefaultEncryptionConfigurationResponse$encryptionType": "The type of encryption used for the encryption configuration.
" + } + }, "EntryId": { "base": null, "refs": { @@ -812,6 +853,7 @@ "ErrorCode": { "base": null, "refs": { + "ConfigurationErrorDetails$code": "", "ErrorDetails$code": "The error code.
" } }, @@ -826,6 +868,7 @@ "base": null, "refs": { "BatchPutAssetPropertyError$errorMessage": "The associated error message.
", + "ConfigurationErrorDetails$message": "", "ConflictingOperationException$message": null, "ErrorDetails$message": "The error message.
", "InternalFailureException$message": null, @@ -1123,6 +1166,12 @@ "refs": { } }, + "KmsKeyId": { + "base": null, + "refs": { + "PutDefaultEncryptionConfigurationRequest$kmsKeyId": "The Key ID of the customer managed customer master key (CMK) used for AWS KMS encryption. This is required if you use KMS_BASED_ENCRYPTION
.
You've reached the limit for a resource. For example, this can occur if you're trying to associate more than the allowed number of child assets or attempting to create more than the allowed number of properties for an asset model.
For more information, see Quotas in the AWS IoT SiteWise User Guide.
", "refs": { @@ -1540,6 +1589,16 @@ "PutAssetPropertyValueEntries$member": null } }, + "PutDefaultEncryptionConfigurationRequest": { + "base": null, + "refs": { + } + }, + "PutDefaultEncryptionConfigurationResponse": { + "base": null, + "refs": { + } + }, "PutLoggingOptionsRequest": { "base": null, "refs": { @@ -1624,7 +1683,7 @@ "SessionDurationSeconds": { "base": null, "refs": { - "CreatePresignedPortalUrlRequest$sessionDurationSeconds": "The duration (in seconds) for which the session at the URL is valid.
Default: 900 seconds (15 minutes)
" + "CreatePresignedPortalUrlRequest$sessionDurationSeconds": "The duration (in seconds) for which the session at the URL is valid.
Default: 43,200 seconds (12 hours)
" } }, "TagKey": { @@ -1858,7 +1917,7 @@ "base": null, "refs": { "CreatePortalResponse$portalStartUrl": "The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.
", - "CreatePresignedPortalUrlResponse$presignedPortalUrl": "The pre-signed URL to the portal. The URL contains the portal ID and a session token that lets you access the portal. The URL has the following format.
https://<portal-id>.app.iotsitewise.aws/auth?token=<encrypted-token>
The pre-signed URL to the portal. The URL contains the portal ID and an authentication token that lets you access the portal. The URL has the following format.
https://<portal-id>.app.iotsitewise.aws/iam?token=<encrypted-token>
The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.
", "ImageLocation$url": "The URL where the image is available. The URL is valid for 15 minutes so that you can view and download the image
", "PortalSummary$startUrl": "The URL for the AWS IoT SiteWise Monitor portal. You can use this URL to access portals that use AWS SSO for authentication. For portals that use IAM for authentication, you must use the CreatePresignedPortalUrl operation to create a URL that you can use to access the portal.
" diff --git a/models/apis/lex-models/2017-04-19/api-2.json b/models/apis/lex-models/2017-04-19/api-2.json index f5cc8e83f13..90116bc0ef0 100644 --- a/models/apis/lex-models/2017-04-19/api-2.json +++ b/models/apis/lex-models/2017-04-19/api-2.json @@ -1965,6 +1965,7 @@ "en-AU", "en-GB", "en-US", + "es-419", "es-ES", "es-US", "fr-FR", diff --git a/models/apis/mediaconvert/2017-08-29/api-2.json b/models/apis/mediaconvert/2017-08-29/api-2.json index ab03c69f424..663d0a5a3b5 100644 --- a/models/apis/mediaconvert/2017-08-29/api-2.json +++ b/models/apis/mediaconvert/2017-08-29/api-2.json @@ -2233,6 +2233,13 @@ "DISABLED" ] }, + "CmfcAudioDuration": { + "type": "string", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "CmfcScte35Esam": { "type": "string", "enum": [ @@ -2250,6 +2257,10 @@ "CmfcSettings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "CmfcAudioDuration", + "locationName": "audioDuration" + }, "Scte35Esam": { "shape": "CmfcScte35Esam", "locationName": "scte35Esam" @@ -2668,6 +2679,10 @@ "shape": "__integerMin0Max2147483647", "locationName": "minBufferTime" }, + "MinFinalSegmentLength": { + "shape": "__doubleMin0Max2147483647", + "locationName": "minFinalSegmentLength" + }, "MpdProfile": { "shape": "DashIsoMpdProfile", "locationName": "mpdProfile" @@ -4992,7 +5007,7 @@ "locationName": "denoiseFilter" }, "FileInput": { - "shape": "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL", + "shape": "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA", "locationName": "fileInput" }, "FilterEnable": { @@ -5959,6 +5974,13 @@ "ATSC" ] }, + "M2tsAudioDuration": { + "type": "string", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "M2tsBufferModel": { "type": "string", "enum": [ @@ -6056,6 +6078,10 @@ "shape": "M2tsAudioBufferModel", "locationName": "audioBufferModel" }, + "AudioDuration": { + "shape": "M2tsAudioDuration", + "locationName": "audioDuration" + }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", "locationName": "audioFramesPerPes" @@ -6198,6 +6224,13 @@ } } }, + "M3u8AudioDuration": { + "type": "string", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "M3u8NielsenId3": { "type": "string", "enum": [ @@ -6222,6 +6255,10 @@ "M3u8Settings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "M3u8AudioDuration", + "locationName": "audioDuration" + }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", "locationName": "audioFramesPerPes" @@ -6490,6 +6527,10 @@ "Mp4Settings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "CmfcAudioDuration", + "locationName": "audioDuration" + }, "CslgAtom": { "shape": "Mp4CslgAtom", "locationName": "cslgAtom" @@ -6512,6 +6553,20 @@ } } }, + "MpdAccessibilityCaptionHints": { + "type": "string", + "enum": [ + "INCLUDE", + "EXCLUDE" + ] + }, + "MpdAudioDuration": { + "type": "string", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "MpdCaptionContainerType": { "type": "string", "enum": [ @@ -6536,6 +6591,14 @@ "MpdSettings": { "type": "structure", "members": { + "AccessibilityCaptionHints": { + "shape": "MpdAccessibilityCaptionHints", + "locationName": "accessibilityCaptionHints" + }, + "AudioDuration": { + "shape": "MpdAudioDuration", + "locationName": "audioDuration" + }, "CaptionContainerType": { "shape": "MpdCaptionContainerType", "locationName": "captionContainerType" @@ -9600,9 +9663,9 @@ "type": "string", "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, - "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { + "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA": { "type": "string", - "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, "__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { "type": "string", diff --git a/models/apis/mediaconvert/2017-08-29/docs-2.json b/models/apis/mediaconvert/2017-08-29/docs-2.json index 12bef398edf..d640333963b 100644 --- a/models/apis/mediaconvert/2017-08-29/docs-2.json +++ b/models/apis/mediaconvert/2017-08-29/docs-2.json @@ -512,9 +512,9 @@ } }, "CmafClientCache": { - "base": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay.", + "base": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", "refs": { - "CmafGroupSettings$ClientCache": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay." + "CmafGroupSettings$ClientCache": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." } }, "CmafCodecSpecification": { @@ -601,6 +601,13 @@ "CmafGroupSettings$WriteSegmentTimelineInRepresentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." } }, + "CmfcAudioDuration": { + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "refs": { + "CmfcSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "Mp4Settings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + } + }, "CmfcScte35Esam": { "base": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", "refs": { @@ -1602,9 +1609,9 @@ } }, "HlsClientCache": { - "base": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay.", + "base": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", "refs": { - "HlsGroupSettings$ClientCache": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay." + "HlsGroupSettings$ClientCache": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." } }, "HlsCodecSpecification": { @@ -1948,6 +1955,12 @@ "M2tsSettings$AudioBufferModel": "Selects between the DVB and ATSC buffer models for Dolby Digital audio." } }, + "M2tsAudioDuration": { + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "refs": { + "M2tsSettings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + } + }, "M2tsBufferModel": { "base": "Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, use multiplex buffer model. If set to NONE, this can lead to lower latency, but low-memory devices may not be able to play back the stream without interruptions.", "refs": { @@ -2026,6 +2039,12 @@ "ContainerSettings$M2tsSettings": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." } }, + "M3u8AudioDuration": { + "base": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "refs": { + "M3u8Settings$AudioDuration": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + } + }, "M3u8NielsenId3": { "base": "If INSERT, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output.", "refs": { @@ -2159,6 +2178,18 @@ "ContainerSettings$Mp4Settings": "Settings for MP4 container. You can create audio-only AAC outputs with this container." } }, + "MpdAccessibilityCaptionHints": { + "base": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest withThis section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.
", + "operations": { + "CreateCliToken": "Create a CLI token to use Airflow CLI.
", + "CreateEnvironment": "JSON blob that describes the environment to create.
", + "CreateWebLoginToken": "Create a JWT token to be used to login to Airflow Web UI with claims based Authentication.
", + "DeleteEnvironment": "Delete an existing environment.
", + "GetEnvironment": "Get details of an existing environment.
", + "ListEnvironments": "List Amazon MWAA Environments.
", + "ListTagsForResource": "List the tags for MWAA environments.
", + "PublishMetrics": "An operation for publishing metrics from the customers to the Ops plane.
", + "TagResource": "Add tag to the MWAA environments.
", + "UntagResource": "Remove a tag from the MWAA environments.
", + "UpdateEnvironment": "Update an MWAA environment.
" + }, + "shapes": { + "AccessDeniedException": { + "base": "Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA user guide to setup permissions to access the Web UI and CLI functionality.
", + "refs": { + } + }, + "AirflowConfigurationOptions": { + "base": null, + "refs": { + "Environment$AirflowConfigurationOptions": "The Airflow Configuration Options of the Amazon MWAA Environment.
" + } + }, + "AirflowVersion": { + "base": null, + "refs": { + "CreateEnvironmentInput$AirflowVersion": "The Apache Airflow version you want to use for your environment.
", + "Environment$AirflowVersion": "The AirflowV ersion of the Amazon MWAA Environment.
", + "UpdateEnvironmentInput$AirflowVersion": "The Airflow Version to update of your Amazon MWAA environment.
" + } + }, + "CloudWatchLogGroupArn": { + "base": null, + "refs": { + "ModuleLoggingConfiguration$CloudWatchLogGroupArn": "Provides the ARN for the CloudWatch group where the logs will be published.
" + } + }, + "ConfigKey": { + "base": null, + "refs": { + "AirflowConfigurationOptions$key": null, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions$key": null, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions$key": null + } + }, + "ConfigValue": { + "base": null, + "refs": { + "AirflowConfigurationOptions$value": null, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions$value": null, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions$value": null + } + }, + "CreateCliTokenRequest": { + "base": null, + "refs": { + } + }, + "CreateCliTokenResponse": { + "base": null, + "refs": { + } + }, + "CreateEnvironmentInput": { + "base": "This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation to create an environment. For more information, see Get started with Amazon Managed Workflows for Apache Airflow.
", + "refs": { + } + }, + "CreateEnvironmentOutput": { + "base": null, + "refs": { + } + }, + "CreateWebLoginTokenRequest": { + "base": null, + "refs": { + } + }, + "CreateWebLoginTokenResponse": { + "base": null, + "refs": { + } + }, + "CreatedAt": { + "base": null, + "refs": { + "Environment$CreatedAt": "The Created At date of the Amazon MWAA Environment.
" + } + }, + "DeleteEnvironmentInput": { + "base": null, + "refs": { + } + }, + "DeleteEnvironmentOutput": { + "base": null, + "refs": { + } + }, + "Dimension": { + "base": "Internal only API.
", + "refs": { + "Dimensions$member": null + } + }, + "Dimensions": { + "base": null, + "refs": { + "MetricDatum$Dimensions": "Internal only API.
" + } + }, + "Double": { + "base": null, + "refs": { + "MetricDatum$Value": "Internal only API.
", + "StatisticSet$Maximum": "Internal only API.
", + "StatisticSet$Minimum": "Internal only API.
", + "StatisticSet$Sum": "Internal only API.
" + } + }, + "Environment": { + "base": "An Amazon MWAA environment.
", + "refs": { + "GetEnvironmentOutput$Environment": "A JSON blob with environment details.
" + } + }, + "EnvironmentArn": { + "base": null, + "refs": { + "CreateEnvironmentOutput$Arn": "The resulting Amazon MWAA envirnonment ARN.
", + "Environment$Arn": "The ARN of the Amazon MWAA Environment.
", + "ListTagsForResourceInput$ResourceArn": "The ARN of the MWAA environment.
", + "TagResourceInput$ResourceArn": "The tag resource ARN of the MWAA environments.
", + "UntagResourceInput$ResourceArn": "The tag resource ARN of the MWAA environments.
", + "UpdateEnvironmentOutput$Arn": "The ARN to update of your Amazon MWAA environment.
" + } + }, + "EnvironmentClass": { + "base": null, + "refs": { + "CreateEnvironmentInput$EnvironmentClass": "The environment class you want to use for your environment. The environment class determines the size of the containers and database used for your Apache Airflow services.
", + "Environment$EnvironmentClass": "The Environment Class (size) of the Amazon MWAA Environment.
", + "UpdateEnvironmentInput$EnvironmentClass": "The Environment Class to update of your Amazon MWAA environment.
" + } + }, + "EnvironmentList": { + "base": null, + "refs": { + "ListEnvironmentsOutput$Environments": "The list of Amazon MWAA Environments.
" + } + }, + "EnvironmentName": { + "base": null, + "refs": { + "CreateCliTokenRequest$Name": "Create a CLI token request for a MWAA environment.
", + "CreateEnvironmentInput$Name": "The name of your MWAA environment.
", + "CreateWebLoginTokenRequest$Name": "Create an Airflow Web UI login token request for a MWAA environment.
", + "DeleteEnvironmentInput$Name": "The name of the environment to delete.
", + "Environment$Name": "The name of the Amazon MWAA Environment.
", + "EnvironmentList$member": null, + "GetEnvironmentInput$Name": "The name of the environment to retrieve.
", + "PublishMetricsInput$EnvironmentName": "Publishes environment metric data to Amazon CloudWatch.
", + "UpdateEnvironmentInput$Name": "The name of your Amazon MWAA environment that you wish to update.
" + } + }, + "EnvironmentStatus": { + "base": null, + "refs": { + "Environment$Status": "The status of the Amazon MWAA Environment.
" + } + }, + "ErrorCode": { + "base": null, + "refs": { + "UpdateError$ErrorCode": "Error code of update.
" + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "UpdateError$ErrorMessage": "Error message of update.
" + } + }, + "GetEnvironmentInput": { + "base": null, + "refs": { + } + }, + "GetEnvironmentOutput": { + "base": null, + "refs": { + } + }, + "Hostname": { + "base": null, + "refs": { + "CreateCliTokenResponse$WebServerHostname": "Create an Airflow CLI login token response for the provided webserver hostname.
", + "CreateWebLoginTokenResponse$WebServerHostname": "Create an Airflow Web UI login token response for the provided webserver hostname.
" + } + }, + "IamRoleArn": { + "base": null, + "refs": { + "CreateEnvironmentInput$ExecutionRoleArn": "The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an AWS Identity and Access Management (IAM) role that grants MWAA permission to access AWS services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role
. For more information, see Managing access to Amazon Managed Workflows for Apache Airflow.
The Execution Role ARN of the Amazon MWAA Environment.
", + "Environment$ServiceRoleArn": "The Service Role ARN of the Amazon MWAA Environment.
", + "UpdateEnvironmentInput$ExecutionRoleArn": "The Executio Role ARN to update of your Amazon MWAA environment.
" + } + }, + "Integer": { + "base": null, + "refs": { + "StatisticSet$SampleCount": "Internal only API.
" + } + }, + "InternalServerException": { + "base": "InternalServerException: An internal error has occurred.
", + "refs": { + } + }, + "KmsKey": { + "base": null, + "refs": { + "CreateEnvironmentInput$KmsKey": "The AWS Key Management Service (KMS) key to encrypt and decrypt the data in your environment. You can use an AWS KMS key managed by MWAA, or a custom KMS key (advanced). For more information, see Customer master keys (CMKs) in the AWS KMS developer guide.
", + "Environment$KmsKey": "The Kms Key of the Amazon MWAA Environment.
" + } + }, + "LastUpdate": { + "base": "Last update information for the environment.
", + "refs": { + "Environment$LastUpdate": null + } + }, + "ListEnvironmentsInput": { + "base": null, + "refs": { + } + }, + "ListEnvironmentsInputMaxResultsInteger": { + "base": null, + "refs": { + "ListEnvironmentsInput$MaxResults": "The maximum results when listing MWAA environments.
" + } + }, + "ListEnvironmentsOutput": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceInput": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceOutput": { + "base": null, + "refs": { + } + }, + "LoggingConfiguration": { + "base": "The Logging Configuration of your Amazon MWAA environment.
", + "refs": { + "Environment$LoggingConfiguration": "The Logging Configuration of the Amazon MWAA Environment.
" + } + }, + "LoggingConfigurationInput": { + "base": "The Logging Configuration of your Amazon MWAA environment.
", + "refs": { + "CreateEnvironmentInput$LoggingConfiguration": "The Apache Airflow logs you want to send to Amazon CloudWatch Logs.
", + "UpdateEnvironmentInput$LoggingConfiguration": "The Logging Configuration to update of your Amazon MWAA environment.
" + } + }, + "LoggingEnabled": { + "base": null, + "refs": { + "ModuleLoggingConfiguration$Enabled": "Defines that the logging module is enabled.
", + "ModuleLoggingConfigurationInput$Enabled": "Defines that the logging module is enabled.
" + } + }, + "LoggingLevel": { + "base": null, + "refs": { + "ModuleLoggingConfiguration$LogLevel": "Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO.
", + "ModuleLoggingConfigurationInput$LogLevel": "Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO.
" + } + }, + "MaxWorkers": { + "base": null, + "refs": { + "CreateEnvironmentInput$MaxWorkers": "The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers and the Fargate containers that run your tasks up to the number you specify in this field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra containers leaving the one worker that is included with your environment.
", + "Environment$MaxWorkers": "The Maximum Workers of the Amazon MWAA Environment.
", + "UpdateEnvironmentInput$MaxWorkers": "The Maximum Workers to update of your Amazon MWAA environment.
" + } + }, + "MetricData": { + "base": null, + "refs": { + "PublishMetricsInput$MetricData": "Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metrica.
" + } + }, + "MetricDatum": { + "base": "Internal only API.
", + "refs": { + "MetricData$member": null + } + }, + "ModuleLoggingConfiguration": { + "base": "A JSON blob that provides configuration to use for logging with respect to the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, and WorkerLogs.
", + "refs": { + "LoggingConfiguration$DagProcessingLogs": null, + "LoggingConfiguration$SchedulerLogs": null, + "LoggingConfiguration$TaskLogs": null, + "LoggingConfiguration$WebserverLogs": null, + "LoggingConfiguration$WorkerLogs": null + } + }, + "ModuleLoggingConfigurationInput": { + "base": "A JSON blob that provides configuration to use for logging with respect to the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, and WorkerLogs.
", + "refs": { + "LoggingConfigurationInput$DagProcessingLogs": null, + "LoggingConfigurationInput$SchedulerLogs": null, + "LoggingConfigurationInput$TaskLogs": null, + "LoggingConfigurationInput$WebserverLogs": null, + "LoggingConfigurationInput$WorkerLogs": null + } + }, + "NetworkConfiguration": { + "base": "Provide the security group and subnet IDs for the workers and scheduler.
", + "refs": { + "CreateEnvironmentInput$NetworkConfiguration": "The VPC networking components you want to use for your environment. At least two private subnet identifiers and one VPC security group identifier are required to create an environment. For more information, see Creating the VPC network for a MWAA environment.
", + "Environment$NetworkConfiguration": null + } + }, + "NextToken": { + "base": null, + "refs": { + "ListEnvironmentsInput$NextToken": "The Next Token when listing MWAA environments.
", + "ListEnvironmentsOutput$NextToken": "The Next Token when listing MWAA environments.
" + } + }, + "PublishMetricsInput": { + "base": null, + "refs": { + } + }, + "PublishMetricsOutput": { + "base": null, + "refs": { + } + }, + "RelativePath": { + "base": null, + "refs": { + "CreateEnvironmentInput$DagS3Path": "The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags
. For more information, see Importing DAGs on Amazon MWAA.
The relative path to the plugins.zip
file on your Amazon S3 storage bucket. For example, plugins.zip
. If a relative path is provided in the request, then PluginsS3ObjectVersion
is required. For more information, see Importing DAGs on Amazon MWAA.
The relative path to the requirements.txt
file on your Amazon S3 storage bucket. For example, requirements.txt
. If a relative path is provided in the request, then RequirementsS3ObjectVersion
is required. For more information, see Importing DAGs on Amazon MWAA.
The Dags S3 Path of the Amazon MWAA Environment.
", + "Environment$PluginsS3Path": "The Plugins.zip S3 Path of the Amazon MWAA Environment.
", + "Environment$RequirementsS3Path": "The Requirement.txt S3 Path of the Amazon MWAA Environment.
", + "UpdateEnvironmentInput$DagS3Path": "The Dags folder S3 Path to update of your Amazon MWAA environment.
", + "UpdateEnvironmentInput$PluginsS3Path": "The Plugins.zip S3 Path to update of your Amazon MWAA environment.
", + "UpdateEnvironmentInput$RequirementsS3Path": "The Requirements.txt S3 Path to update of your Amazon MWAA environment.
" + } + }, + "ResourceNotFoundException": { + "base": "ResourceNotFoundException: The resource is not available.
", + "refs": { + } + }, + "S3BucketArn": { + "base": null, + "refs": { + "CreateEnvironmentInput$SourceBucketArn": "The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname
.
The Source S3 Bucket ARN of the Amazon MWAA Environment.
", + "UpdateEnvironmentInput$SourceBucketArn": "The S3 Source Bucket ARN to update of your Amazon MWAA environment.
" + } + }, + "S3ObjectVersion": { + "base": null, + "refs": { + "CreateEnvironmentInput$PluginsS3ObjectVersion": "The plugins.zip
file version you want to use.
The requirements.txt
file version you want to use.
The Plugins.zip S3 Object Version of the Amazon MWAA Environment.
", + "Environment$RequirementsS3ObjectVersion": "The Requirements.txt file S3 Object Version of the Amazon MWAA Environment.
", + "UpdateEnvironmentInput$PluginsS3ObjectVersion": "The Plugins.zip S3 Object Version to update of your Amazon MWAA environment.
", + "UpdateEnvironmentInput$RequirementsS3ObjectVersion": "The Requirements.txt S3 ObjectV ersion to update of your Amazon MWAA environment.
" + } + }, + "SecurityGroupId": { + "base": null, + "refs": { + "SecurityGroupList$member": null + } + }, + "SecurityGroupList": { + "base": null, + "refs": { + "NetworkConfiguration$SecurityGroupIds": "A JSON list of 1 or more security groups IDs by name, in the same VPC as the subnets.
", + "UpdateNetworkConfigurationInput$SecurityGroupIds": "Provide a JSON list of 1 or more security groups IDs by name, in the same VPC as the subnets.
" + } + }, + "StatisticSet": { + "base": "Internal only API.
", + "refs": { + "MetricDatum$StatisticValues": "Internal only API.
" + } + }, + "String": { + "base": null, + "refs": { + "AccessDeniedException$Message": null, + "Dimension$Name": "Internal only API.
", + "Dimension$Value": "Internal only API.
", + "InternalServerException$message": null, + "MetricDatum$MetricName": "Internal only API.
", + "ResourceNotFoundException$message": null, + "ValidationException$message": null + } + }, + "SubnetId": { + "base": null, + "refs": { + "SubnetList$member": null + } + }, + "SubnetList": { + "base": null, + "refs": { + "NetworkConfiguration$SubnetIds": "Provide a JSON list of 2 subnet IDs by name. These must be private subnets, in the same VPC, in two different availability zones.
" + } + }, + "SyntheticCreateCliTokenResponseToken": { + "base": null, + "refs": { + "CreateCliTokenResponse$CliToken": "Create an Airflow CLI login token response for the provided JWT token.
" + } + }, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions": { + "base": null, + "refs": { + "CreateEnvironmentInput$AirflowConfigurationOptions": "The Apache Airflow configuration setting you want to override in your environment. For more information, see Environment configuration.
" + } + }, + "SyntheticCreateWebLoginTokenResponseToken": { + "base": null, + "refs": { + "CreateWebLoginTokenResponse$WebToken": "Create an Airflow Web UI login token response for the provided JWT token.
" + } + }, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions": { + "base": null, + "refs": { + "UpdateEnvironmentInput$AirflowConfigurationOptions": "The Airflow Configuration Options to update of your Amazon MWAA environment.
" + } + }, + "TagKey": { + "base": null, + "refs": { + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceInput$tagKeys": "The tag resource key of the MWAA environments.
" + } + }, + "TagMap": { + "base": null, + "refs": { + "CreateEnvironmentInput$Tags": "The metadata tags you want to attach to your environment. For more information, see Tagging AWS resources.
", + "Environment$Tags": "The Tags of the Amazon MWAA Environment.
", + "ListTagsForResourceOutput$Tags": "The tags of the MWAA environments.
", + "TagResourceInput$Tags": "The tag resource tag of the MWAA environments.
" + } + }, + "TagResourceInput": { + "base": null, + "refs": { + } + }, + "TagResourceOutput": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, + "Timestamp": { + "base": null, + "refs": { + "MetricDatum$Timestamp": "Internal only API.
" + } + }, + "Unit": { + "base": "Unit
", + "refs": { + "MetricDatum$Unit": null + } + }, + "UntagResourceInput": { + "base": null, + "refs": { + } + }, + "UntagResourceOutput": { + "base": null, + "refs": { + } + }, + "UpdateCreatedAt": { + "base": null, + "refs": { + "LastUpdate$CreatedAt": "Time that last update occurred.
" + } + }, + "UpdateEnvironmentInput": { + "base": null, + "refs": { + } + }, + "UpdateEnvironmentOutput": { + "base": null, + "refs": { + } + }, + "UpdateError": { + "base": "Error information of update, if applicable.
", + "refs": { + "LastUpdate$Error": "Error string of last update, if applicable.
" + } + }, + "UpdateNetworkConfigurationInput": { + "base": "Provide the security group and subnet IDs for the workers and scheduler.
", + "refs": { + "UpdateEnvironmentInput$NetworkConfiguration": "The Network Configuration to update of your Amazon MWAA environment.
" + } + }, + "UpdateStatus": { + "base": null, + "refs": { + "LastUpdate$Status": "Status of last update of SUCCESS, FAILED, CREATING, DELETING.
" + } + }, + "ValidationException": { + "base": "ValidationException: The provided input is not valid.
", + "refs": { + } + }, + "WebserverAccessMode": { + "base": null, + "refs": { + "CreateEnvironmentInput$WebserverAccessMode": "The networking access of your Apache Airflow web server. A public network allows your Airflow UI to be accessed over the Internet by users granted access in your IAM policy. A private network limits access of your Airflow UI to users within your VPC. For more information, see Creating the VPC network for a MWAA environment.
", + "Environment$WebserverAccessMode": "The Webserver Access Mode of the Amazon MWAA Environment (public or private only).
", + "UpdateEnvironmentInput$WebserverAccessMode": "The Webserver Access Mode to update of your Amazon MWAA environment.
" + } + }, + "WebserverUrl": { + "base": null, + "refs": { + "Environment$WebserverUrl": "The Webserver URL of the Amazon MWAA Environment.
" + } + }, + "WeeklyMaintenanceWindowStart": { + "base": null, + "refs": { + "CreateEnvironmentInput$WeeklyMaintenanceWindowStart": "The day and time you want MWAA to start weekly maintenance updates on your environment.
", + "Environment$WeeklyMaintenanceWindowStart": "The Weekly Maintenance Window Start of the Amazon MWAA Environment.
", + "UpdateEnvironmentInput$WeeklyMaintenanceWindowStart": "The Weekly Maintenance Window Start to update of your Amazon MWAA environment.
" + } + } + } +} diff --git a/models/apis/mwaa/2020-07-01/examples-1.json b/models/apis/mwaa/2020-07-01/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/mwaa/2020-07-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/mwaa/2020-07-01/paginators-1.json b/models/apis/mwaa/2020-07-01/paginators-1.json new file mode 100644 index 00000000000..5e218e4616b --- /dev/null +++ b/models/apis/mwaa/2020-07-01/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListEnvironments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Environments" + } + } +} diff --git a/models/apis/quicksight/2018-04-01/api-2.json b/models/apis/quicksight/2018-04-01/api-2.json index da087382ae0..13c03cc0492 100644 --- a/models/apis/quicksight/2018-04-01/api-2.json +++ b/models/apis/quicksight/2018-04-01/api-2.json @@ -926,6 +926,7 @@ {"shape":"IdentityTypeNotSupportedException"}, {"shape":"SessionLifetimeInMinutesInvalidException"}, {"shape":"UnsupportedUserEditionException"}, + {"shape":"UnsupportedPricingPlanException"}, {"shape":"InternalFailureException"} ] }, @@ -1800,6 +1801,12 @@ "AvailabilityStatus":{"shape":"DashboardBehavior"} } }, + "AdditionalDashboardIdList":{ + "type":"list", + "member":{"shape":"RestrictiveResourceId"}, + "max":20, + "min":1 + }, "AliasName":{ "type":"string", "max":2048, @@ -4431,6 +4438,14 @@ "ENTERPRISE" ] }, + "EmbeddingIdentityType":{ + "type":"string", + "enum":[ + "IAM", + "QUICKSIGHT", + "ANONYMOUS" + ] + }, "EmbeddingUrl":{ "type":"string", "sensitive":true @@ -4542,7 +4557,7 @@ "locationName":"DashboardId" }, "IdentityType":{ - "shape":"IdentityType", + "shape":"EmbeddingIdentityType", "location":"querystring", "locationName":"creds-type" }, @@ -4570,6 +4585,16 @@ "shape":"Arn", "location":"querystring", "locationName":"user-arn" + }, + "Namespace":{ + "shape":"Namespace", + "location":"querystring", + "locationName":"namespace" + }, + "AdditionalDashboardIds":{ + "shape":"AdditionalDashboardIdList", + "location":"querystring", + "locationName":"additional-dashboard-ids" } } }, @@ -6922,6 +6947,15 @@ "MeasureForeground":{"shape":"HexColor"} } }, + "UnsupportedPricingPlanException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, "UnsupportedUserEditionException":{ "type":"structure", "members":{ diff --git a/models/apis/quicksight/2018-04-01/docs-2.json b/models/apis/quicksight/2018-04-01/docs-2.json index 7435add014b..7a17b1212c9 100644 --- a/models/apis/quicksight/2018-04-01/docs-2.json +++ b/models/apis/quicksight/2018-04-01/docs-2.json @@ -148,6 +148,12 @@ "DashboardPublishOptions$AdHocFilteringOption": "Ad hoc (one-time) filtering option.
" } }, + "AdditionalDashboardIdList": { + "base": null, + "refs": { + "GetDashboardEmbedUrlRequest$AdditionalDashboardIds": "A list of one or more dashboard ids that you want to add to a session that includes anonymous authorizations. IdentityType
must be set to ANONYMOUS for this to work, because other other identity types authenticate as QuickSight users. For example, if you set \"--dashboard-id dash_id1 --dashboard-id dash_id2 dash_id3 identity-type ANONYMOUS
\", the session can access all three dashboards.
Indicates if the dataset has column level permission configured.
", + "DataSetSummary$ColumnLevelPermissionRulesApplied": "Indicates if the dataset has column level permission configured.
", "GetDashboardEmbedUrlRequest$UndoRedoDisabled": "Remove the undo/redo button on the embedded dashboard. The default is FALSE, which enables the undo/redo button.
", "GetDashboardEmbedUrlRequest$ResetDisabled": "Remove the reset button on the embedded dashboard. The default is FALSE, which enables the reset button.
", "GetDashboardEmbedUrlRequest$StatePersistenceEnabled": "Adds persistence of state for the user session in an embedded dashboard. Persistence applies to the sheet and the parameter settings. These are control settings that the dashboard subscriber (QuickSight reader) chooses while viewing the dashboard. If this is set to TRUE
, the settings are the same when the the subscriber reopens the same dashboard URL. The state is stored in QuickSight, not in a browser cookie. If this is set to FALSE, the state of the user session is not persisted. The default is FALSE
.
The edition of QuickSight that you're currently subscribed to: Enterprise edition or Standard edition.
" } }, + "EmbeddingIdentityType": { + "base": null, + "refs": { + "GetDashboardEmbedUrlRequest$IdentityType": "The authentication method that the user uses to sign in.
" + } + }, "EmbeddingUrl": { "base": null, "refs": { @@ -1647,12 +1659,12 @@ } }, "GetDashboardEmbedUrlRequest": { - "base": null, + "base": "Parameter input for the GetDashboardEmbedUrl
operation.
Output returned from the GetDashboardEmbedUrl
operation.
The authentication method that the user uses to sign in.
", "RegisterUserRequest$IdentityType": "Amazon QuickSight supports several ways of managing the identity of users. This parameter accepts two values:
IAM
: A user whose identity maps to an existing IAM user or role.
QUICKSIGHT
: A user whose identity is owned and managed internally by Amazon QuickSight.
The type of identity authentication used by the user.
" } @@ -2297,6 +2308,7 @@ "DescribeIAMPolicyAssignmentRequest$Namespace": "The namespace that contains the assignment.
", "DescribeNamespaceRequest$Namespace": "The namespace that you want to describe.
", "DescribeUserRequest$Namespace": "The namespace. Currently, you should set this to default
.
The QuickSight namespace that contains the dashboard IDs in this request. If you're not using a custom namespace, set this to \"default
\".
The namespace. Currently, you should set this to default
.
The namespace. Currently, you should set this to default
.
The namespace of the assignment.
", @@ -2687,6 +2699,7 @@ "RestrictiveResourceId": { "base": null, "refs": { + "AdditionalDashboardIdList$member": null, "Analysis$AnalysisId": "The ID of the analysis.
", "AnalysisSummary$AnalysisId": "The ID of the analysis. This ID displays in the URL.
", "CreateAnalysisRequest$AnalysisId": "The ID for the analysis that you're creating. This ID displays in the URL of the analysis.
", @@ -3207,6 +3220,8 @@ "TagResourceResponse$RequestId": "The AWS request ID for this operation.
", "ThrottlingException$Message": null, "ThrottlingException$RequestId": "The AWS request ID for this request.
", + "UnsupportedPricingPlanException$Message": null, + "UnsupportedPricingPlanException$RequestId": "The AWS request ID for this request.
", "UnsupportedUserEditionException$Message": null, "UnsupportedUserEditionException$RequestId": "The AWS request ID for this request.
", "UntagResourceResponse$RequestId": "The AWS request ID for this operation.
", @@ -3603,6 +3618,11 @@ "ThemeConfiguration$UIColorPalette": "Color properties that apply to the UI and to charts, excluding the colors that apply to data.
" } }, + "UnsupportedPricingPlanException": { + "base": "This error indicates that you are calling an embedding operation in Amazon QuickSight without the required pricing plan on your AWS account. Before you can use anonymous embedding, a QuickSight administrator needs to add capacity pricing to QuickSight. You can do this on the Manage QuickSight page.
After capacity pricing is added, you can enable anonymous embedding by using the GetDashboardEmbedUrl
API operation with the --identity-type ANONYMOUS
option.
This error indicates that you are calling an operation on an Amazon QuickSight subscription where the edition doesn't include support for that operation. Amazon QuickSight currently has Standard Edition and Enterprise Edition. Not every operation and capability is available in every edition.
", "refs": { diff --git a/models/apis/states/2016-11-23/api-2.json b/models/apis/states/2016-11-23/api-2.json index a60dbfa4ad9..5aa1176395d 100644 --- a/models/apis/states/2016-11-23/api-2.json +++ b/models/apis/states/2016-11-23/api-2.json @@ -268,6 +268,24 @@ ], "idempotent":true }, + "StartSyncExecution":{ + "name":"StartSyncExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartSyncExecutionInput"}, + "output":{"shape":"StartSyncExecutionOutput"}, + "errors":[ + {"shape":"InvalidArn"}, + {"shape":"InvalidExecutionInput"}, + {"shape":"InvalidName"}, + {"shape":"StateMachineDoesNotExist"}, + {"shape":"StateMachineDeleting"}, + {"shape":"StateMachineTypeNotSupported"} + ], + "endpoint":{"hostPrefix":"sync-"} + }, "StopExecution":{ "name":"StopExecution", "http":{ @@ -423,10 +441,25 @@ "max":256, "min":1 }, + "BilledDuration":{ + "type":"long", + "min":0 + }, + "BilledMemoryUsed":{ + "type":"long", + "min":0 + }, + "BillingDetails":{ + "type":"structure", + "members":{ + "billedMemoryUsedInMB":{"shape":"BilledMemoryUsed"}, + "billedDurationInMilliseconds":{"shape":"BilledDuration"} + } + }, "CloudWatchEventsExecutionDataDetails":{ "type":"structure", "members":{ - "included":{"shape":"included"} + "included":{"shape":"includedDetails"} } }, "CloudWatchLogsLogGroup":{ @@ -1199,6 +1232,41 @@ "startDate":{"shape":"Timestamp"} } }, + "StartSyncExecutionInput":{ + "type":"structure", + "required":["stateMachineArn"], + "members":{ + "stateMachineArn":{"shape":"Arn"}, + "name":{"shape":"Name"}, + "input":{"shape":"SensitiveData"}, + "traceHeader":{"shape":"TraceHeader"} + } + }, + "StartSyncExecutionOutput":{ + "type":"structure", + "required":[ + "executionArn", + "startDate", + "stopDate", + "status" + ], + "members":{ + "executionArn":{"shape":"Arn"}, + "stateMachineArn":{"shape":"Arn"}, + "name":{"shape":"Name"}, + "startDate":{"shape":"Timestamp"}, + "stopDate":{"shape":"Timestamp"}, + "status":{"shape":"SyncExecutionStatus"}, + "error":{"shape":"SensitiveError"}, + "cause":{"shape":"SensitiveCause"}, + "input":{"shape":"SensitiveData"}, + "inputDetails":{"shape":"CloudWatchEventsExecutionDataDetails"}, + "output":{"shape":"SensitiveData"}, + "outputDetails":{"shape":"CloudWatchEventsExecutionDataDetails"}, + "traceHeader":{"shape":"TraceHeader"}, + "billingDetails":{"shape":"BillingDetails"} + } + }, "StateEnteredEventDetails":{ "type":"structure", "required":["name"], @@ -1301,6 +1369,14 @@ "stopDate":{"shape":"Timestamp"} } }, + "SyncExecutionStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "TIMED_OUT" + ] + }, "Tag":{ "type":"structure", "members":{ @@ -1533,7 +1609,7 @@ "updateDate":{"shape":"Timestamp"} } }, - "included":{"type":"boolean"}, + "includedDetails":{"type":"boolean"}, "truncated":{"type":"boolean"} } } diff --git a/models/apis/states/2016-11-23/docs-2.json b/models/apis/states/2016-11-23/docs-2.json index 685016ee658..0d978bfacc8 100644 --- a/models/apis/states/2016-11-23/docs-2.json +++ b/models/apis/states/2016-11-23/docs-2.json @@ -20,6 +20,7 @@ "SendTaskHeartbeat": "Used by activity workers and task states using the callback pattern to report to Step Functions that the task represented by the specified taskToken
is still making progress. This action resets the Heartbeat
clock. The Heartbeat
threshold is specified in the state machine's Amazon States Language definition (HeartbeatSeconds
). This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ActivityTimedOut
entry for activities, or a TaskTimedOut
entry for for tasks using the job run or callback pattern.
The Timeout
of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received. Use HeartbeatSeconds
to configure the timeout interval for heartbeats.
Used by activity workers and task states using the callback pattern to report that the task identified by the taskToken
completed successfully.
Starts a state machine execution.
StartExecution
is idempotent. If StartExecution
is called with the same name and input as a running execution, the call will succeed and return the same response as the original request. If the execution is closed or if the input is different, it will return a 400 ExecutionAlreadyExists
error. Names can be reused after 90 days.
Starts a Synchronous Express state machine execution.
", "StopExecution": "Stops an execution.
This API action is not supported by EXPRESS
state machines.
Add a tag to a Step Functions resource.
An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.
Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @
.
Remove a tag from a Step Functions resource
", @@ -103,7 +104,7 @@ "DescribeActivityInput$activityArn": "The Amazon Resource Name (ARN) of the activity to describe.
", "DescribeActivityOutput$activityArn": "The Amazon Resource Name (ARN) that identifies the activity.
", "DescribeExecutionInput$executionArn": "The Amazon Resource Name (ARN) of the execution to describe.
", - "DescribeExecutionOutput$executionArn": "The Amazon Resource Name (ARN) that id entifies the execution.
", + "DescribeExecutionOutput$executionArn": "The Amazon Resource Name (ARN) that identifies the execution.
", "DescribeExecutionOutput$stateMachineArn": "The Amazon Resource Name (ARN) of the executed stated machine.
", "DescribeStateMachineForExecutionInput$executionArn": "The Amazon Resource Name (ARN) of the execution you want state machine information for.
", "DescribeStateMachineForExecutionOutput$stateMachineArn": "The Amazon Resource Name (ARN) of the state machine associated with the execution.
", @@ -111,7 +112,7 @@ "DescribeStateMachineInput$stateMachineArn": "The Amazon Resource Name (ARN) of the state machine to describe.
", "DescribeStateMachineOutput$stateMachineArn": "The Amazon Resource Name (ARN) that identifies the state machine.
", "DescribeStateMachineOutput$roleArn": "The Amazon Resource Name (ARN) of the IAM role used when creating this state machine. (The IAM role maintains security by granting Step Functions access to AWS resources.)
", - "ExecutionListItem$executionArn": "The Amazon Resource Name (ARN) that id entifies the execution.
", + "ExecutionListItem$executionArn": "The Amazon Resource Name (ARN) that identifies the execution.
", "ExecutionListItem$stateMachineArn": "The Amazon Resource Name (ARN) of the executed state machine.
", "ExecutionStartedEventDetails$roleArn": "The Amazon Resource Name (ARN) of the IAM role used for executing AWS Lambda tasks.
", "GetActivityTaskInput$activityArn": "The Amazon Resource Name (ARN) of the activity to retrieve tasks from (assigned when you create the task using CreateActivity.)
", @@ -121,7 +122,10 @@ "ListTagsForResourceInput$resourceArn": "The Amazon Resource Name (ARN) for the Step Functions state machine or activity.
", "ResourceNotFound$resourceName": null, "StartExecutionInput$stateMachineArn": "The Amazon Resource Name (ARN) of the state machine to execute.
", - "StartExecutionOutput$executionArn": "The Amazon Resource Name (ARN) that id entifies the execution.
", + "StartExecutionOutput$executionArn": "The Amazon Resource Name (ARN) that identifies the execution.
", + "StartSyncExecutionInput$stateMachineArn": "The Amazon Resource Name (ARN) of the state machine to execute.
", + "StartSyncExecutionOutput$executionArn": "The Amazon Resource Name (ARN) that identifies the execution.
", + "StartSyncExecutionOutput$stateMachineArn": "The Amazon Resource Name (ARN) that identifies the state machine.
", "StateMachineListItem$stateMachineArn": "The Amazon Resource Name (ARN) that identifies the state machine.
", "StopExecutionInput$executionArn": "The Amazon Resource Name (ARN) of the execution to stop.
", "TagResourceInput$resourceArn": "The Amazon Resource Name (ARN) for the Step Functions state machine or activity.
", @@ -131,11 +135,31 @@ "UpdateStateMachineInput$roleArn": "The Amazon Resource Name (ARN) of the IAM role of the state machine.
" } }, + "BilledDuration": { + "base": null, + "refs": { + "BillingDetails$billedDurationInMilliseconds": "Billed duration of your workflow, in milliseconds.
" + } + }, + "BilledMemoryUsed": { + "base": null, + "refs": { + "BillingDetails$billedMemoryUsedInMB": "Billed memory consumption of your workflow, in MB.
" + } + }, + "BillingDetails": { + "base": "An object that describes workflow billing details.
", + "refs": { + "StartSyncExecutionOutput$billingDetails": "An object that describes workflow billing details, including billed duration and memory use.
" + } + }, "CloudWatchEventsExecutionDataDetails": { "base": "Provides details about execution input or output.
", "refs": { "DescribeExecutionOutput$inputDetails": null, - "DescribeExecutionOutput$outputDetails": null + "DescribeExecutionOutput$outputDetails": null, + "StartSyncExecutionOutput$inputDetails": null, + "StartSyncExecutionOutput$outputDetails": null } }, "CloudWatchLogsLogGroup": { @@ -601,6 +625,8 @@ "GetActivityTaskInput$workerName": "You can provide an arbitrary name in order to identify the worker that the task is assigned to. This name is used when it is logged in the execution history.
", "MapIterationEventDetails$name": "The name of the iteration’s parent Map state.
", "StartExecutionInput$name": "The name of the execution. This name must be unique for your AWS account, region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the AWS Step Functions Developer Guide.
A name must not contain:
white space
brackets < > { } [ ]
wildcard characters ? *
special characters \" # % \\ ^ | ~ ` $ & , ; : /
control characters (U+0000-001F
, U+007F-009F
)
To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.
", + "StartSyncExecutionInput$name": "The name of the execution.
", + "StartSyncExecutionOutput$name": "The name of the execution.
", "StateEnteredEventDetails$name": "The name of the state.
", "StateExitedEventDetails$name": "The name of the state.
A name must not contain:
white space
brackets < > { } [ ]
wildcard characters ? *
special characters \" # % \\ ^ | ~ ` $ & , ; : /
control characters (U+0000-001F
, U+007F-009F
)
To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.
", "StateMachineListItem$name": "The name of the state machine.
A name must not contain:
white space
brackets < > { } [ ]
wildcard characters ? *
special characters \" # % \\ ^ | ~ ` $ & , ; : /
control characters (U+0000-001F
, U+007F-009F
)
To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.
", @@ -698,6 +724,7 @@ "LambdaFunctionStartFailedEventDetails$cause": "A more detailed explanation of the cause of the failure.
", "LambdaFunctionTimedOutEventDetails$cause": "A more detailed explanation of the cause of the timeout.
", "SendTaskFailureInput$cause": "A more detailed explanation of the cause of the failure.
", + "StartSyncExecutionOutput$cause": "A more detailed explanation of the cause of the failure.
", "StopExecutionInput$cause": "A more detailed explanation of the cause of the failure.
", "TaskFailedEventDetails$cause": "A more detailed explanation of the cause of the failure.
", "TaskStartFailedEventDetails$cause": "A more detailed explanation of the cause of the failure.
", @@ -718,6 +745,9 @@ "LambdaFunctionSucceededEventDetails$output": "The JSON data output by the lambda function. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
", "SendTaskSuccessInput$output": "The JSON output of the task. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
", "StartExecutionInput$input": "The string that contains the JSON input data for the execution, for example:
\"input\": \"{\\\"first_name\\\" : \\\"test\\\"}\"
If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\"
Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
", + "StartSyncExecutionInput$input": "The string that contains the JSON input data for the execution, for example:
\"input\": \"{\\\"first_name\\\" : \\\"test\\\"}\"
If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\"
Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
", + "StartSyncExecutionOutput$input": "The string that contains the JSON input data of the execution. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
", + "StartSyncExecutionOutput$output": "The JSON output data of the execution. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
This field is set only if the execution succeeds. If the execution fails, this field is null.
The string that contains the JSON input data for the state. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
", "StateExitedEventDetails$output": "The JSON output data of the state. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
", "TaskSubmittedEventDetails$output": "The response from a resource when a task has started. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.
", @@ -744,6 +774,7 @@ "LambdaFunctionStartFailedEventDetails$error": "The error code of the failure.
", "LambdaFunctionTimedOutEventDetails$error": "The error code of the failure.
", "SendTaskFailureInput$error": "The error code of the failure.
", + "StartSyncExecutionOutput$error": "The error code of the failure.
", "StopExecutionInput$error": "The error code of the failure.
", "TaskFailedEventDetails$error": "The error code of the failure.
", "TaskStartFailedEventDetails$error": "The error code of the failure.
", @@ -761,6 +792,16 @@ "refs": { } }, + "StartSyncExecutionInput": { + "base": null, + "refs": { + } + }, + "StartSyncExecutionOutput": { + "base": null, + "refs": { + } + }, "StateEnteredEventDetails": { "base": "Contains details about a state entered during an execution.
", "refs": { @@ -834,6 +875,12 @@ "refs": { } }, + "SyncExecutionStatus": { + "base": null, + "refs": { + "StartSyncExecutionOutput$status": "The current status of the execution.
" + } + }, "Tag": { "base": "Tags are key-value pairs that can be associated with Step Functions state machines and activities.
An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags.
Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @
.
If the execution already ended, the date the execution stopped.
", "HistoryEvent$timestamp": "The date and time the event occurred.
", "StartExecutionOutput$startDate": "The date the execution is started.
", + "StartSyncExecutionOutput$startDate": "The date the execution is started.
", + "StartSyncExecutionOutput$stopDate": "If the execution has already ended, the date the execution stopped.
", "StateMachineListItem$creationDate": "The date the state machine is created.
", "StopExecutionOutput$stopDate": "The date the execution is stopped.
", "UpdateStateMachineOutput$updateDate": "The date and time the state machine was updated.
" @@ -983,8 +1032,10 @@ "TraceHeader": { "base": null, "refs": { - "DescribeExecutionOutput$traceHeader": "The AWS X-Ray trace header which was passed to the execution.
", - "StartExecutionInput$traceHeader": "Passes the AWS X-Ray trace header. The trace header can also be passed in the request payload.
" + "DescribeExecutionOutput$traceHeader": "The AWS X-Ray trace header that was passed to the execution.
", + "StartExecutionInput$traceHeader": "Passes the AWS X-Ray trace header. The trace header can also be passed in the request payload.
", + "StartSyncExecutionInput$traceHeader": "Passes the AWS X-Ray trace header. The trace header can also be passed in the request payload.
", + "StartSyncExecutionOutput$traceHeader": "The AWS X-Ray trace header that was passed to the execution.
" } }, "TracingConfiguration": { @@ -1023,7 +1074,7 @@ "refs": { } }, - "included": { + "includedDetails": { "base": null, "refs": { "CloudWatchEventsExecutionDataDetails$included": "Indicates whether input or output was included in the response. Always true
for API calls.
Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to AWS managed KMS keys for more info. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.
", "CreateTable": "The CreateTable operation adds a new table to an existing database in your account. In an AWS account, table names must be at least unique within each Region if they are in the same database. You may have identical table names in the same Region if the tables are in seperate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.
", - "DeleteDatabase": "Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.
All tables in the database must be deleted first, or a ValidationException error will be thrown.
", - "DeleteTable": "Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.
", + "DeleteDatabase": "Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.
All tables in the database must be deleted first, or a ValidationException error will be thrown.
Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.
", + "DeleteTable": "Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.
Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.
", "DescribeDatabase": "Returns information about the database, including the database name, time that the database was created, and the total number of tables found within the database. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.
", "DescribeEndpoints": "DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query.
Because Timestream’s SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless:
Your application uses a programming language that does not yet have SDK support
You require better control over the client-side implementation
For detailed information on how to use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs.
", "DescribeTable": "Returns information about the table, including the table name, database name, retention duration of the memory store and the magnetic store. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.
", @@ -158,7 +158,7 @@ "ConflictException$Message": null, "InternalServerException$Message": null, "InvalidEndpointException$Message": null, - "RejectedRecord$Reason": "The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:
Records with duplicate data where there are multiple records with the same dimensions, timestamps, and measure names but different measure values.
Records with timestamps that lie outside the retention duration of the memory store
Records with dimensions or measures that exceed the Timestream defined limits.
For more information, see Access Management in the Timestream Developer Guide.
", + "RejectedRecord$Reason": "The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:
Records with duplicate data where there are multiple records with the same dimensions, timestamps, and measure names but different measure values.
Records with timestamps that lie outside the retention duration of the memory store
When the retention window is updated, you will receive a RejectedRecords
exception if you immediately try to ingest data within the new window. To avoid a RejectedRecords
exception, wait until the duration of the new window to ingest new data. For further information, see Best Practices for Configuring Timestream and the explanation of how storage works in Timestream.
Records with dimensions or measures that exceed the Timestream defined limits.
For more information, see Access Management in the Timestream Developer Guide.
", "RejectedRecordsException$Message": null, "ResourceNotFoundException$Message": null, "ServiceQuotaExceededException$Message": null, @@ -251,6 +251,13 @@ "RejectedRecord$RecordIndex": "The index of the record in the input request for WriteRecords. Indexes begin with 0.
" } }, + "RecordVersion": { + "base": null, + "refs": { + "Record$Version": "64-bit attribute used for record updates. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version
will still be updated . Default value is to 1.
The existing version of the record. This value is populated in scenarios where an identical record exists with a higher version than the version in the write request.
" + } + }, "Records": { "base": null, "refs": { @@ -340,9 +347,9 @@ "StringValue256": { "base": null, "refs": { - "Dimension$Name": "Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions. Dimension names can only contain alphanumeric characters and underscores. Dimension names cannot end with an underscore.
", + "Dimension$Name": "Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions.
For constraints on Dimension names, see Naming Constraints.
", "Record$MeasureName": "Measure represents the data attribute of the time series. For example, the CPU utilization of an EC2 instance or the RPM of a wind turbine are measures.
", - "Record$Time": "Contains the time at which the measure value for the data point was collected.
" + "Record$Time": " Contains the time at which the measure value for the data point was collected. The time value plus the unit provides the time elapsed since the epoch. For example, if the time value is 12345
and the unit is ms
, then 12345 ms
have elapsed since the epoch.
Operations and objects for transcribing streaming speech to text.
", "operations": { + "StartMedicalStreamTranscription": "Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon Transcribe Medical and the transcription results are streamed to your application.
", "StartStreamTranscription": "Starts a bidirectional HTTP2 stream where audio is streamed to Amazon Transcribe and the transcription results are streamed to your application.
The following are encoded as HTTP2 headers:
x-amzn-transcribe-language-code
x-amzn-transcribe-media-encoding
x-amzn-transcribe-sample-rate
x-amzn-transcribe-session-id
Represents the audio stream from your application to Amazon Transcribe.
", "refs": { + "StartMedicalStreamTranscriptionRequest$AudioStream": null, "StartStreamTranscriptionRequest$AudioStream": "PCM-encoded stream of audio blobs. The audio stream is encoded as an HTTP2 data frame.
" } }, "BadRequestException": { - "base": "One or more arguments to the StartStreamTranscription
operation was invalid. For example, MediaEncoding
was not set to pcm
or LanguageCode
was not set to a valid code. Check the parameters and try your request again.
One or more arguments to the StartStreamTranscription
or StartMedicalStreamTranscription
operation was invalid. For example, MediaEncoding
was not set to a valid encoding, or LanguageCode
was not set to a valid code. Check the parameters and try your request again.
A client error occurred when the stream was created. Check the parameters of the request and try your request again.
" } }, @@ -45,16 +48,28 @@ "base": null, "refs": { "Item$VocabularyFilterMatch": "Indicates whether a word in the item matches a word in the vocabulary filter you've chosen for your real-time stream. If true
then a word in the item matches your vocabulary filter.
Amazon Transcribe Medical divides the incoming audio stream into segments at natural points in the audio. Transcription results are returned based on these segments.
The IsPartial
field is true
to indicate that Amazon Transcribe Medical has additional transcription data to send. The IsPartial
field is false
to indicate that this is the last transcription result for the segment.
Amazon Transcribe divides the incoming audio stream into segments at natural points in the audio. Transcription results are returned based on these segments.
The IsPartial
field is true
to indicate that Amazon Transcribe has additional transcription data to send, false
to indicate that this is the last transcription result for the segment.
When true
, enables speaker identification in your real-time stream.
When true
, instructs Amazon Transcribe Medical to process each audio channel separately and then merge the transcription output of each channel into a single transcription.
Amazon Transcribe Medical also produces a transcription of each item. An item includes the start time, end time, and any alternative transcriptions.
You can't set both ShowSpeakerLabel
and EnableChannelIdentification
in the same request. If you set both, your request returns a BadRequestException
.
Shows whether speaker identification was enabled in the stream.
", + "StartMedicalStreamTranscriptionResponse$EnableChannelIdentification": "Shows whether channel identification has been enabled in the stream.
", "StartStreamTranscriptionRequest$ShowSpeakerLabel": "When true
, enables speaker identification in your real-time stream.
When true
, instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.
Amazon Transcribe also produces a transcription of each item. An item includes the start time, end time, and any alternative transcriptions.
You can't set both ShowSpeakerLabel
and EnableChannelIdentification
in the same request. If you set both, your request returns a BadRequestException
.
Shows whether speaker identification was enabled in the stream.
", "StartStreamTranscriptionResponse$EnableChannelIdentification": "Shows whether channel identification has been enabled in the stream.
" } }, + "Confidence": { + "base": null, + "refs": { + "MedicalItem$Confidence": "A value between 0 and 1 for an item that is a confidence score that Amazon Transcribe Medical assigns to each word that it transcribes.
" + } + }, "ConflictException": { "base": "A new stream started with the same session ID. The current stream has been terminated.
", "refs": { + "MedicalTranscriptResultStream$ConflictException": null, "TranscriptResultStream$ConflictException": "A new stream started with the same session ID. The current stream has been terminated.
" } }, @@ -63,13 +78,18 @@ "refs": { "Item$StartTime": "The offset from the beginning of the audio stream to the beginning of the audio that resulted in the item.
", "Item$EndTime": "The offset from the beginning of the audio stream to the end of the audio that resulted in the item.
", + "MedicalItem$StartTime": "The number of seconds into an audio stream that indicates the creation time of an item.
", + "MedicalItem$EndTime": "The number of seconds into an audio stream that indicates the creation time of an item.
", + "MedicalResult$StartTime": "The time, in seconds, from the beginning of the audio stream to the beginning of the result.
", + "MedicalResult$EndTime": "The time, in seconds, from the beginning of the audio stream to the end of the result.
", "Result$StartTime": "The offset in seconds from the beginning of the audio stream to the beginning of the result.
", "Result$EndTime": "The offset in seconds from the beginning of the audio stream to the end of the result.
" } }, "InternalFailureException": { - "base": "A problem occurred while processing the audio. Amazon Transcribe terminated processing. Try your request again.
", + "base": "A problem occurred while processing the audio. Amazon Transcribe or Amazon Transcribe Medical terminated processing. Try your request again.
", "refs": { + "MedicalTranscriptResultStream$InternalFailureException": null, "TranscriptResultStream$InternalFailureException": "A problem occurred while processing the audio. Amazon Transcribe terminated processing.
" } }, @@ -88,12 +108,15 @@ "ItemType": { "base": null, "refs": { - "Item$Type": "The type of the item. PRONUNCIATION
indicates that the item is a word that was recognized in the input audio. PUNCTUATION
indicates that the item was interpreted as a pause in the input audio.
The type of the item. PRONUNCIATION
indicates that the item is a word that was recognized in the input audio. PUNCTUATION
indicates that the item was interpreted as a pause in the input audio.
The type of the item. PRONUNCIATION
indicates that the item is a word that was recognized in the input audio. PUNCTUATION
indicates that the item was interpreted as a pause in the input audio, such as a period to indicate the end of a sentence.
Indicates the source language used in the input audio stream. For Amazon Transcribe Medical, this is US English (en-US).
", + "StartMedicalStreamTranscriptionResponse$LanguageCode": "The language code for the response transcript. For Amazon Transcribe Medical, this is US English (en-US).
", "StartStreamTranscriptionRequest$LanguageCode": "Indicates the source language used in the input audio stream.
", "StartStreamTranscriptionResponse$LanguageCode": "The language code for the input audio stream.
" } @@ -101,26 +124,87 @@ "LimitExceededException": { "base": "You have exceeded the maximum number of concurrent transcription streams, are starting transcription streams too quickly, or the maximum audio length of 4 hours. Wait until a stream has finished processing, or break your audio stream into smaller chunks and try your request again.
", "refs": { + "MedicalTranscriptResultStream$LimitExceededException": null, "TranscriptResultStream$LimitExceededException": "Your client has exceeded one of the Amazon Transcribe limits, typically the limit on audio length. Break your audio stream into smaller chunks and try your request again.
" } }, "MediaEncoding": { "base": null, "refs": { - "StartStreamTranscriptionRequest$MediaEncoding": "The encoding used for the input audio. pcm
is the only valid value.
The encoding used for the input audio.
", + "StartMedicalStreamTranscriptionResponse$MediaEncoding": "The encoding used for the input audio stream.
", + "StartStreamTranscriptionRequest$MediaEncoding": "The encoding used for the input audio.
", "StartStreamTranscriptionResponse$MediaEncoding": "The encoding used for the input audio stream.
" } }, "MediaSampleRateHertz": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$MediaSampleRateHertz": "The sample rate of the input audio in Hertz. Sample rates of 16000 Hz or higher are accepted.
", + "StartMedicalStreamTranscriptionResponse$MediaSampleRateHertz": "The sample rate of the input audio in Hertz. Valid value: 16000 Hz.
", "StartStreamTranscriptionRequest$MediaSampleRateHertz": "The sample rate, in Hertz, of the input audio. We suggest that you use 8000 Hz for low quality audio and 16000 Hz for high quality audio.
", "StartStreamTranscriptionResponse$MediaSampleRateHertz": "The sample rate for the input audio stream. Use 8000 Hz for low quality audio and 16000 Hz for high quality audio.
" } }, + "MedicalAlternative": { + "base": "A list of possible transcriptions for the audio.
", + "refs": { + "MedicalAlternativeList$member": null + } + }, + "MedicalAlternativeList": { + "base": null, + "refs": { + "MedicalResult$Alternatives": "A list of possible transcriptions of the audio. Each alternative typically contains one Item
that contains the result of the transcription.
A word or punctuation that is transcribed from the input audio.
", + "refs": { + "MedicalItemList$member": null + } + }, + "MedicalItemList": { + "base": null, + "refs": { + "MedicalAlternative$Items": "A list of objects that contains words and punctuation marks that represents one or more interpretations of the input audio.
" + } + }, + "MedicalResult": { + "base": "The results of transcribing a portion of the input audio stream.
", + "refs": { + "MedicalResultList$member": null + } + }, + "MedicalResultList": { + "base": null, + "refs": { + "MedicalTranscript$Results": "MedicalResult objects that contain the results of transcribing a portion of the input audio stream. The array can be empty.
" + } + }, + "MedicalTranscript": { + "base": "The medical transcript in a MedicalTranscriptEvent.
", + "refs": { + "MedicalTranscriptEvent$Transcript": "The transcription of the audio stream. The transcription is composed of all of the items in the results list.
" + } + }, + "MedicalTranscriptEvent": { + "base": "Represents a set of transcription results from the server to the client. It contains one or more segments of the transcription.
", + "refs": { + "MedicalTranscriptResultStream$TranscriptEvent": "A portion of the transcription of the audio stream. Events are sent periodically from Amazon Transcribe Medical to your application. The event can be a partial transcription of a section of the audio stream, or it can be the entire transcription of that portion of the audio stream.
" + } + }, + "MedicalTranscriptResultStream": { + "base": "Represents the transcription result stream from Amazon Transcribe Medical to your application.
", + "refs": { + "StartMedicalStreamTranscriptionResponse$TranscriptResultStream": "Represents the stream of transcription events from Amazon Transcribe Medical to your application.
" + } + }, "NumberOfChannels": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$NumberOfChannels": "The number of channels that are in your audio stream.
", + "StartMedicalStreamTranscriptionResponse$NumberOfChannels": "The number of channels identified in the stream.
", "StartStreamTranscriptionRequest$NumberOfChannels": "The number of channels that are in your audio stream.
", "StartStreamTranscriptionResponse$NumberOfChannels": "The number of channels identified in the stream.
" } @@ -128,6 +212,7 @@ "RequestId": { "base": null, "refs": { + "StartMedicalStreamTranscriptionResponse$RequestId": "An identifier for the streaming transcription.
", "StartStreamTranscriptionResponse$RequestId": "An identifier for the streaming transcription.
" } }, @@ -146,16 +231,36 @@ "ServiceUnavailableException": { "base": "Service is currently unavailable. Try your request later.
", "refs": { + "MedicalTranscriptResultStream$ServiceUnavailableException": null, "TranscriptResultStream$ServiceUnavailableException": "Service is currently unavailable. Try your request later.
" } }, "SessionId": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$SessionId": "Optional. An identifier for the transcription session. If you don't provide a session ID, Amazon Transcribe generates one for you and returns it in the response.
", + "StartMedicalStreamTranscriptionResponse$SessionId": "Optional. An identifier for the transcription session. If you don't provide a session ID, Amazon Transcribe generates one for you and returns it in the response.
", "StartStreamTranscriptionRequest$SessionId": "A identifier for the transcription session. Use this parameter when you want to retry a session. If you don't provide a session ID, Amazon Transcribe will generate one for you and return it in the response.
", "StartStreamTranscriptionResponse$SessionId": "An identifier for a specific transcription session.
" } }, + "Specialty": { + "base": null, + "refs": { + "StartMedicalStreamTranscriptionRequest$Specialty": "The medical specialty of the clinician or provider.
", + "StartMedicalStreamTranscriptionResponse$Specialty": "The specialty in the medical domain.
" + } + }, + "StartMedicalStreamTranscriptionRequest": { + "base": null, + "refs": { + } + }, + "StartMedicalStreamTranscriptionResponse": { + "base": null, + "refs": { + } + }, "StartStreamTranscriptionRequest": { "base": null, "refs": { @@ -176,6 +281,11 @@ "Item$Content": "The word or punctuation that was recognized in the input audio.
", "Item$Speaker": "If speaker identification is enabled, shows the speakers identified in the real-time stream.
", "LimitExceededException$Message": null, + "MedicalAlternative$Transcript": "The text that was transcribed from the audio.
", + "MedicalItem$Content": "The word or punctuation mark that was recognized in the input audio.
", + "MedicalItem$Speaker": "If speaker identification is enabled, shows the integer values that correspond to the different speakers identified in the stream. For example, if the value of Speaker
in the stream is either a 0
or a 1
, that indicates that Amazon Transcribe Medical has identified two speakers in the stream. The value of 0
corresponds to one speaker and the value of 1
corresponds to the other speaker.
A unique identifier for the result.
", + "MedicalResult$ChannelId": "When channel identification is enabled, Amazon Transcribe Medical transcribes the speech from each audio channel separately.
You can use ChannelId
to retrieve the transcription results for a single channel in your audio stream.
A unique identifier for the result.
", "Result$ChannelId": "When channel identification is enabled, Amazon Transcribe transcribes the speech from each audio channel separately.
You can use ChannelId
to retrieve the transcription results for a single channel in your audio stream.
Represents the stream of transcription events from Amazon Transcribe to your application.
" } }, + "Type": { + "base": null, + "refs": { + "StartMedicalStreamTranscriptionRequest$Type": "The type of input audio. Choose DICTATION
for a provider dictating patient notes. Choose CONVERSATION
for a dialogue between a patient and one or more medical professionanls.
The type of audio that was transcribed.
" + } + }, "VocabularyFilterMethod": { "base": null, "refs": { @@ -216,6 +333,8 @@ "VocabularyName": { "base": null, "refs": { + "StartMedicalStreamTranscriptionRequest$VocabularyName": "The name of the medical custom vocabulary to use when processing the real-time stream.
", + "StartMedicalStreamTranscriptionResponse$VocabularyName": "The name of the vocabulary used when processing the stream.
", "StartStreamTranscriptionRequest$VocabularyName": "The name of the vocabulary to use when processing the transcription job.
", "StartStreamTranscriptionResponse$VocabularyName": "The name of the vocabulary used when processing the stream.
" } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 92cb2ded7bb..973270b68c5 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -5462,6 +5462,7 @@ }, "snowball" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -7046,6 +7047,12 @@ "cn-northwest-1" : { } } }, + "ram" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "rds" : { "endpoints" : { "cn-north-1" : { }, diff --git a/service/appflow/api.go b/service/appflow/api.go index 6d5bf186bc1..502925eebb0 100644 --- a/service/appflow/api.go +++ b/service/appflow/api.go @@ -2425,6 +2425,9 @@ type ConnectorMetadata struct { // The connector metadata specific to Trend Micro. Trendmicro *TrendmicroMetadata `type:"structure"` + // The connector metadata specific to Upsolver. + Upsolver *UpsolverMetadata `type:"structure"` + // The connector metadata specific to Veeva. Veeva *VeevaMetadata `type:"structure"` @@ -2532,6 +2535,12 @@ func (s *ConnectorMetadata) SetTrendmicro(v *TrendmicroMetadata) *ConnectorMetad return s } +// SetUpsolver sets the Upsolver field's value. +func (s *ConnectorMetadata) SetUpsolver(v *UpsolverMetadata) *ConnectorMetadata { + s.Upsolver = v + return s +} + // SetVeeva sets the Veeva field's value. func (s *ConnectorMetadata) SetVeeva(v *VeevaMetadata) *ConnectorMetadata { s.Veeva = v @@ -4535,6 +4544,9 @@ type DestinationConnectorProperties struct { // The properties required to query Snowflake. Snowflake *SnowflakeDestinationProperties `type:"structure"` + + // The properties required to query Upsolver. + Upsolver *UpsolverDestinationProperties `type:"structure"` } // String returns the string representation @@ -4575,6 +4587,11 @@ func (s *DestinationConnectorProperties) Validate() error { invalidParams.AddNested("Snowflake", err.(request.ErrInvalidParams)) } } + if s.Upsolver != nil { + if err := s.Upsolver.Validate(); err != nil { + invalidParams.AddNested("Upsolver", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4612,6 +4629,12 @@ func (s *DestinationConnectorProperties) SetSnowflake(v *SnowflakeDestinationPro return s } +// SetUpsolver sets the Upsolver field's value. +func (s *DestinationConnectorProperties) SetUpsolver(v *UpsolverDestinationProperties) *DestinationConnectorProperties { + s.Upsolver = v + return s +} + // The properties that can be applied to a field when connector is being used // as a destination. type DestinationFieldProperties struct { @@ -6959,7 +6982,8 @@ type ScheduledTriggerProperties struct { // Specifies the scheduled end time for a schedule-triggered flow. ScheduleEndTime *time.Time `locationName:"scheduleEndTime" type:"timestamp"` - // The scheduling expression that determines when and how often the rule runs. + // The scheduling expression that determines the rate at which the schedule + // will run, for example rate(5minutes). // // ScheduleExpression is a required field ScheduleExpression *string `locationName:"scheduleExpression" type:"string" required:"true"` @@ -9028,6 +9052,156 @@ func (s *UpdateFlowOutput) SetFlowStatus(v string) *UpdateFlowOutput { return s } +// The properties that are applied when Upsolver is used as a destination. +type UpsolverDestinationProperties struct { + _ struct{} `type:"structure"` + + // The Upsolver Amazon S3 bucket name in which Amazon AppFlow places the transferred + // data. + // + // BucketName is a required field + BucketName *string `locationName:"bucketName" min:"16" type:"string" required:"true"` + + // The object key for the destination Upsolver Amazon S3 bucket in which Amazon + // AppFlow places the files. + BucketPrefix *string `locationName:"bucketPrefix" type:"string"` + + // The configuration that determines how data is formatted when Upsolver is + // used as the flow destination. + // + // S3OutputFormatConfig is a required field + S3OutputFormatConfig *UpsolverS3OutputFormatConfig `locationName:"s3OutputFormatConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpsolverDestinationProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpsolverDestinationProperties) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpsolverDestinationProperties) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpsolverDestinationProperties"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 16 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 16)) + } + if s.S3OutputFormatConfig == nil { + invalidParams.Add(request.NewErrParamRequired("S3OutputFormatConfig")) + } + if s.S3OutputFormatConfig != nil { + if err := s.S3OutputFormatConfig.Validate(); err != nil { + invalidParams.AddNested("S3OutputFormatConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *UpsolverDestinationProperties) SetBucketName(v string) *UpsolverDestinationProperties { + s.BucketName = &v + return s +} + +// SetBucketPrefix sets the BucketPrefix field's value. +func (s *UpsolverDestinationProperties) SetBucketPrefix(v string) *UpsolverDestinationProperties { + s.BucketPrefix = &v + return s +} + +// SetS3OutputFormatConfig sets the S3OutputFormatConfig field's value. +func (s *UpsolverDestinationProperties) SetS3OutputFormatConfig(v *UpsolverS3OutputFormatConfig) *UpsolverDestinationProperties { + s.S3OutputFormatConfig = v + return s +} + +// The connector metadata specific to Upsolver. +type UpsolverMetadata struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpsolverMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpsolverMetadata) GoString() string { + return s.String() +} + +// The configuration that determines how Amazon AppFlow formats the flow output +// data when Upsolver is used as the destination. +type UpsolverS3OutputFormatConfig struct { + _ struct{} `type:"structure"` + + // The aggregation settings that you can use to customize the output format + // of your flow data. + AggregationConfig *AggregationConfig `locationName:"aggregationConfig" type:"structure"` + + // Indicates the file type that Amazon AppFlow places in the Upsolver Amazon + // S3 bucket. + FileType *string `locationName:"fileType" type:"string" enum:"FileType"` + + // Determines the prefix that Amazon AppFlow applies to the destination folder + // name. You can name your destination folders according to the flow frequency + // and date. + // + // PrefixConfig is a required field + PrefixConfig *PrefixConfig `locationName:"prefixConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpsolverS3OutputFormatConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpsolverS3OutputFormatConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpsolverS3OutputFormatConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpsolverS3OutputFormatConfig"} + if s.PrefixConfig == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixConfig")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregationConfig sets the AggregationConfig field's value. +func (s *UpsolverS3OutputFormatConfig) SetAggregationConfig(v *AggregationConfig) *UpsolverS3OutputFormatConfig { + s.AggregationConfig = v + return s +} + +// SetFileType sets the FileType field's value. +func (s *UpsolverS3OutputFormatConfig) SetFileType(v string) *UpsolverS3OutputFormatConfig { + s.FileType = &v + return s +} + +// SetPrefixConfig sets the PrefixConfig field's value. +func (s *UpsolverS3OutputFormatConfig) SetPrefixConfig(v *PrefixConfig) *UpsolverS3OutputFormatConfig { + s.PrefixConfig = v + return s +} + // The request has invalid or missing parameters. type ValidationException struct { _ struct{} `type:"structure"` @@ -9500,6 +9674,9 @@ const ( // ConnectorTypeEventBridge is a ConnectorType enum value ConnectorTypeEventBridge = "EventBridge" + + // ConnectorTypeUpsolver is a ConnectorType enum value + ConnectorTypeUpsolver = "Upsolver" ) // ConnectorType_Values returns all elements of the ConnectorType enum @@ -9522,6 +9699,7 @@ func ConnectorType_Values() []string { ConnectorTypeAmplitude, ConnectorTypeVeeva, ConnectorTypeEventBridge, + ConnectorTypeUpsolver, } } diff --git a/service/batch/api.go b/service/batch/api.go index 9de96e5dbfe..3950091b1b7 100644 --- a/service/batch/api.go +++ b/service/batch/api.go @@ -2507,13 +2507,20 @@ type ComputeResource struct { // The desired number of Amazon EC2 vCPUS in the compute environment. DesiredvCpus *int64 `locationName:"desiredvCpus" type:"integer"` + // Provides additional details used to selecting the AMI to use for instances + // in a compute environment. + Ec2Configuration []*Ec2Configuration `locationName:"ec2Configuration" type:"list"` + // The Amazon EC2 key pair that is used for instances launched in the compute // environment. Ec2KeyPair *string `locationName:"ec2KeyPair" type:"string"` // The Amazon Machine Image (AMI) ID used for instances launched in the compute - // environment. - ImageId *string `locationName:"imageId" type:"string"` + // environment. This parameter is overridden by the imageIdOverride member of + // the Ec2Configuration structure. + // + // Deprecated: This field is deprecated, use ec2Configuration[].imageIdOverride instead. + ImageId *string `locationName:"imageId" deprecated:"true" type:"string"` // The Amazon ECS instance profile applied to Amazon EC2 instances in a compute // environment. You can specify the short name or full Amazon Resource Name @@ -2629,6 +2636,16 @@ func (s *ComputeResource) Validate() error { if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } + if s.Ec2Configuration != nil { + for i, v := range s.Ec2Configuration { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Ec2Configuration", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2654,6 +2671,12 @@ func (s *ComputeResource) SetDesiredvCpus(v int64) *ComputeResource { return s } +// SetEc2Configuration sets the Ec2Configuration field's value. +func (s *ComputeResource) SetEc2Configuration(v []*Ec2Configuration) *ComputeResource { + s.Ec2Configuration = v + return s +} + // SetEc2KeyPair sets the Ec2KeyPair field's value. func (s *ComputeResource) SetEc2KeyPair(v string) *ComputeResource { s.Ec2KeyPair = &v @@ -4387,6 +4410,85 @@ func (s *Device) SetPermissions(v []*string) *Device { return s } +// Provides information used to select Amazon Machine Images (AMIs) for instances +// in the compute environment. If the Ec2Configuration is not specified, the +// default is ECS_AL1. +type Ec2Configuration struct { + _ struct{} `type:"structure"` + + // The AMI ID used for instances launched in the compute environment that match + // the image type. This setting overrides the imageId set in the computeResource + // object. + ImageIdOverride *string `locationName:"imageIdOverride" min:"1" type:"string"` + + // The image type to match with the instance type to pick an AMI. If the imageIdOverride + // parameter is not specified, then a recent Amazon ECS-optimized AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) + // will be used. + // + // ECS_AL2 + // + // Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)− + // Default for all AWS Graviton-based instance families (for example, C6g, M6g, + // R6g, and T4g) and can be used for all non-GPU instance types. + // + // ECS_AL2_NVIDIA + // + // Amazon Linux 2 (GPU) (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami)−Default + // for all GPU instance families (for example P4 and G4) and can be used for + // all non-AWS Graviton-based instance types. + // + // ECS_AL1 + // + // Amazon Linux (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami)−Default + // for all non-GPU, non-AWS-Graviton instance families. Amazon Linux is reaching + // the end-of-life of standard support. For more information, see Amazon Linux + // AMI (https://aws.amazon.com/amazon-linux-ami/). + // + // ImageType is a required field + ImageType *string `locationName:"imageType" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Ec2Configuration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ec2Configuration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Ec2Configuration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Ec2Configuration"} + if s.ImageIdOverride != nil && len(*s.ImageIdOverride) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIdOverride", 1)) + } + if s.ImageType == nil { + invalidParams.Add(request.NewErrParamRequired("ImageType")) + } + if s.ImageType != nil && len(*s.ImageType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageIdOverride sets the ImageIdOverride field's value. +func (s *Ec2Configuration) SetImageIdOverride(v string) *Ec2Configuration { + s.ImageIdOverride = &v + return s +} + +// SetImageType sets the ImageType field's value. +func (s *Ec2Configuration) SetImageType(v string) *Ec2Configuration { + s.ImageType = &v + return s +} + // Specifies a set of conditions to be met, and an action to take (RETRY or // EXIT) if all conditions are met. type EvaluateOnExit struct { diff --git a/service/cloudformation/api.go b/service/cloudformation/api.go index 34a976c8830..498b77207fb 100644 --- a/service/cloudformation/api.go +++ b/service/cloudformation/api.go @@ -7805,9 +7805,9 @@ func (s DeleteStackSetOutput) GoString() string { } // [Service-managed permissions] The AWS Organizations accounts to which StackSets -// deploys. StackSets does not deploy stack instances to the organization master -// account, even if the master account is in your organization or in an OU in -// your organization. +// deploys. StackSets does not deploy stack instances to the organization management +// account, even if the organization management account is in your organization +// or in an OU in your organization. // // For update operations, you can specify either Accounts or OrganizationalUnitIds. // For create and delete operations, specify OrganizationalUnitIds. @@ -11637,6 +11637,9 @@ type ListTypesInput struct { // handlers, and therefore cannot actually be provisioned. ProvisioningType *string `type:"string" enum:"ProvisioningType"` + // The type of extension. + Type *string `type:"string" enum:"RegistryType"` + // The scope at which the type is visible and usable in CloudFormation operations. // // Valid values include: @@ -11702,6 +11705,12 @@ func (s *ListTypesInput) SetProvisioningType(v string) *ListTypesInput { return s } +// SetType sets the Type field's value. +func (s *ListTypesInput) SetType(v string) *ListTypesInput { + s.Type = &v + return s +} + // SetVisibility sets the Visibility field's value. func (s *ListTypesInput) SetVisibility(v string) *ListTypesInput { s.Visibility = &v @@ -11805,6 +11814,62 @@ func (s *LoggingConfig) SetLogRoleArn(v string) *LoggingConfig { return s } +// Contains information about the module from which the resource was created, +// if the resource was created from a module included in the stack template. +// +// For more information on modules, see Using modules to encapsulate and reuse +// resource configurations (AWSCloudFormation/latest/UserGuide/modules.html) +// in the CloudFormation User Guide. +type ModuleInfo struct { + _ struct{} `type:"structure"` + + // A concantenated list of the logical IDs of the module or modules containing + // the resource. Modules are listed starting with the inner-most nested module, + // and separated by /. + // + // In the following example, the resource was created from a module, moduleA, + // that is nested inside a parent module, moduleB. + // + // moduleA/moduleB + // + // For more information, see Referencing resources in a module (AWSCloudFormation/latest/UserGuide/modules.html#module-ref-resources) + // in the CloudFormation User Guide. + LogicalIdHierarchy *string `type:"string"` + + // A concantenated list of the the module type or types containing the resource. + // Module types are listed starting with the inner-most nested module, and separated + // by /. + // + // In the following example, the resource was created from a module of type + // AWS::First::Example::MODULE, that is nested inside a parent module of type + // AWS::Second::Example::MODULE. + // + // AWS::First::Example::MODULE/AWS::Second::Example::MODULE + TypeHierarchy *string `type:"string"` +} + +// String returns the string representation +func (s ModuleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModuleInfo) GoString() string { + return s.String() +} + +// SetLogicalIdHierarchy sets the LogicalIdHierarchy field's value. +func (s *ModuleInfo) SetLogicalIdHierarchy(v string) *ModuleInfo { + s.LogicalIdHierarchy = &v + return s +} + +// SetTypeHierarchy sets the TypeHierarchy field's value. +func (s *ModuleInfo) SetTypeHierarchy(v string) *ModuleInfo { + s.TypeHierarchy = &v + return s +} + // The Output data type. type Output struct { _ struct{} `type:"structure"` @@ -12438,6 +12503,10 @@ type ResourceChange struct { // The resource's logical ID, which is defined in the stack's template. LogicalResourceId *string `type:"string"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The resource's physical ID (resource name). Resources that you are adding // don't have physical IDs because they haven't been created. PhysicalResourceId *string `type:"string"` @@ -12498,6 +12567,12 @@ func (s *ResourceChange) SetLogicalResourceId(v string) *ResourceChange { return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *ResourceChange) SetModuleInfo(v *ModuleInfo) *ResourceChange { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *ResourceChange) SetPhysicalResourceId(v string) *ResourceChange { s.PhysicalResourceId = &v @@ -14114,6 +14189,10 @@ type StackResource struct { // LogicalResourceId is a required field LogicalResourceId *string `type:"string" required:"true"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The name or unique identifier that corresponds to a physical instance ID // of a resource supported by AWS CloudFormation. PhysicalResourceId *string `type:"string"` @@ -14173,6 +14252,12 @@ func (s *StackResource) SetLogicalResourceId(v string) *StackResource { return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *StackResource) SetModuleInfo(v *ModuleInfo) *StackResource { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *StackResource) SetPhysicalResourceId(v string) *StackResource { s.PhysicalResourceId = &v @@ -14243,6 +14328,10 @@ type StackResourceDetail struct { // in the AWS CloudFormation User Guide. Metadata *string `type:"string"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The name or unique identifier that corresponds to a physical instance ID // of a resource supported by AWS CloudFormation. PhysicalResourceId *string `type:"string"` @@ -14309,6 +14398,12 @@ func (s *StackResourceDetail) SetMetadata(v string) *StackResourceDetail { return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *StackResourceDetail) SetModuleInfo(v *ModuleInfo) *StackResourceDetail { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *StackResourceDetail) SetPhysicalResourceId(v string) *StackResourceDetail { s.PhysicalResourceId = &v @@ -14379,6 +14474,10 @@ type StackResourceDrift struct { // LogicalResourceId is a required field LogicalResourceId *string `type:"string" required:"true"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The name or unique identifier that corresponds to a physical instance ID // of a resource supported by AWS CloudFormation. PhysicalResourceId *string `type:"string"` @@ -14456,6 +14555,12 @@ func (s *StackResourceDrift) SetLogicalResourceId(v string) *StackResourceDrift return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *StackResourceDrift) SetModuleInfo(v *ModuleInfo) *StackResourceDrift { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *StackResourceDrift) SetPhysicalResourceId(v string) *StackResourceDrift { s.PhysicalResourceId = &v @@ -14621,6 +14726,10 @@ type StackResourceSummary struct { // LogicalResourceId is a required field LogicalResourceId *string `type:"string" required:"true"` + // Contains information about the module from which the resource was created, + // if the resource was created from a module included in the stack template. + ModuleInfo *ModuleInfo `type:"structure"` + // The name or unique identifier that corresponds to a physical instance ID // of the resource. PhysicalResourceId *string `type:"string"` @@ -14669,6 +14778,12 @@ func (s *StackResourceSummary) SetLogicalResourceId(v string) *StackResourceSumm return s } +// SetModuleInfo sets the ModuleInfo field's value. +func (s *StackResourceSummary) SetModuleInfo(v *ModuleInfo) *StackResourceSummary { + s.ModuleInfo = v + return s +} + // SetPhysicalResourceId sets the PhysicalResourceId field's value. func (s *StackResourceSummary) SetPhysicalResourceId(v string) *StackResourceSummary { s.PhysicalResourceId = &v @@ -17652,12 +17767,16 @@ func RegistrationStatus_Values() []string { const ( // RegistryTypeResource is a RegistryType enum value RegistryTypeResource = "RESOURCE" + + // RegistryTypeModule is a RegistryType enum value + RegistryTypeModule = "MODULE" ) // RegistryType_Values returns all elements of the RegistryType enum func RegistryType_Values() []string { return []string{ RegistryTypeResource, + RegistryTypeModule, } } diff --git a/service/cloudtrail/api.go b/service/cloudtrail/api.go index 5c1a5889e42..530b9ee5aaf 100644 --- a/service/cloudtrail/api.go +++ b/service/cloudtrail/api.go @@ -255,8 +255,9 @@ func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.R // valid. // // * KmsKeyNotFoundException -// This exception is thrown when the KMS key does not exist, or when the S3 -// bucket and the KMS key are not in the same region. +// This exception is thrown when the KMS key does not exist, when the S3 bucket +// and the KMS key are not in the same region, or when the KMS key associated +// with the SNS topic either does not exist or is not in the same region. // // * KmsKeyDisabledException // This exception is no longer in use. @@ -1535,8 +1536,8 @@ func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request // with a maximum of 50 possible. The response includes a token that you can // use to get the next page of results. // -// The rate of lookup requests is limited to two per second per account. If -// this limit is exceeded, a throttling error occurs. +// The rate of lookup requests is limited to two per second, per account, per +// region. If this limit is exceeded, a throttling error occurs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2443,8 +2444,9 @@ func (c *CloudTrail) UpdateTrailRequest(input *UpdateTrailInput) (req *request.R // other than the region in which the trail was created. // // * KmsKeyNotFoundException -// This exception is thrown when the KMS key does not exist, or when the S3 -// bucket and the KMS key are not in the same region. +// This exception is thrown when the KMS key does not exist, when the S3 bucket +// and the KMS key are not in the same region, or when the KMS key associated +// with the SNS topic either does not exist or is not in the same region. // // * KmsKeyDisabledException // This exception is no longer in use. @@ -2714,6 +2716,175 @@ func (s AddTagsOutput) GoString() string { return s.String() } +type AdvancedEventSelector struct { + _ struct{} `type:"structure"` + + // FieldSelectors is a required field + FieldSelectors []*AdvancedFieldSelector `min:"1" type:"list" required:"true"` + + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AdvancedEventSelector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedEventSelector) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdvancedEventSelector) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdvancedEventSelector"} + if s.FieldSelectors == nil { + invalidParams.Add(request.NewErrParamRequired("FieldSelectors")) + } + if s.FieldSelectors != nil && len(s.FieldSelectors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FieldSelectors", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.FieldSelectors != nil { + for i, v := range s.FieldSelectors { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FieldSelectors", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFieldSelectors sets the FieldSelectors field's value. +func (s *AdvancedEventSelector) SetFieldSelectors(v []*AdvancedFieldSelector) *AdvancedEventSelector { + s.FieldSelectors = v + return s +} + +// SetName sets the Name field's value. +func (s *AdvancedEventSelector) SetName(v string) *AdvancedEventSelector { + s.Name = &v + return s +} + +type AdvancedFieldSelector struct { + _ struct{} `type:"structure"` + + EndsWith []*string `min:"1" type:"list"` + + Equals []*string `min:"1" type:"list"` + + // Field is a required field + Field *string `min:"1" type:"string" required:"true"` + + NotEndsWith []*string `min:"1" type:"list"` + + NotEquals []*string `min:"1" type:"list"` + + NotStartsWith []*string `min:"1" type:"list"` + + StartsWith []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s AdvancedFieldSelector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedFieldSelector) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdvancedFieldSelector) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdvancedFieldSelector"} + if s.EndsWith != nil && len(s.EndsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EndsWith", 1)) + } + if s.Equals != nil && len(s.Equals) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Equals", 1)) + } + if s.Field == nil { + invalidParams.Add(request.NewErrParamRequired("Field")) + } + if s.Field != nil && len(*s.Field) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Field", 1)) + } + if s.NotEndsWith != nil && len(s.NotEndsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotEndsWith", 1)) + } + if s.NotEquals != nil && len(s.NotEquals) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotEquals", 1)) + } + if s.NotStartsWith != nil && len(s.NotStartsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotStartsWith", 1)) + } + if s.StartsWith != nil && len(s.StartsWith) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartsWith", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndsWith sets the EndsWith field's value. +func (s *AdvancedFieldSelector) SetEndsWith(v []*string) *AdvancedFieldSelector { + s.EndsWith = v + return s +} + +// SetEquals sets the Equals field's value. +func (s *AdvancedFieldSelector) SetEquals(v []*string) *AdvancedFieldSelector { + s.Equals = v + return s +} + +// SetField sets the Field field's value. +func (s *AdvancedFieldSelector) SetField(v string) *AdvancedFieldSelector { + s.Field = &v + return s +} + +// SetNotEndsWith sets the NotEndsWith field's value. +func (s *AdvancedFieldSelector) SetNotEndsWith(v []*string) *AdvancedFieldSelector { + s.NotEndsWith = v + return s +} + +// SetNotEquals sets the NotEquals field's value. +func (s *AdvancedFieldSelector) SetNotEquals(v []*string) *AdvancedFieldSelector { + s.NotEquals = v + return s +} + +// SetNotStartsWith sets the NotStartsWith field's value. +func (s *AdvancedFieldSelector) SetNotStartsWith(v []*string) *AdvancedFieldSelector { + s.NotStartsWith = v + return s +} + +// SetStartsWith sets the StartsWith field's value. +func (s *AdvancedFieldSelector) SetStartsWith(v []*string) *AdvancedFieldSelector { + s.StartsWith = v + return s +} + // Cannot set a CloudWatch Logs delivery for this region. type CloudWatchLogsDeliveryUnavailableException struct { _ struct{} `type:"structure"` @@ -3504,6 +3675,11 @@ type EventSelector struct { // in the AWS CloudTrail User Guide. // // By default, the value is true. + // + // The first copy of management events is free. You are charged for additional + // copies of management events that you are logging on any subsequent trail + // in the same region. For more information about CloudTrail pricing, see AWS + // CloudTrail Pricing (http://aws.amazon.com/cloudtrail/pricing/). IncludeManagementEvents *bool `type:"boolean"` // Specify if you want your trail to log read-only events, write-only events, @@ -3606,6 +3782,8 @@ func (s *GetEventSelectorsInput) SetTrailName(v string) *GetEventSelectorsInput type GetEventSelectorsOutput struct { _ struct{} `type:"structure"` + AdvancedEventSelectors []*AdvancedEventSelector `type:"list"` + // The event selectors that are configured for the trail. EventSelectors []*EventSelector `type:"list"` @@ -3623,6 +3801,12 @@ func (s GetEventSelectorsOutput) GoString() string { return s.String() } +// SetAdvancedEventSelectors sets the AdvancedEventSelectors field's value. +func (s *GetEventSelectorsOutput) SetAdvancedEventSelectors(v []*AdvancedEventSelector) *GetEventSelectorsOutput { + s.AdvancedEventSelectors = v + return s +} + // SetEventSelectors sets the EventSelectors field's value. func (s *GetEventSelectorsOutput) SetEventSelectors(v []*EventSelector) *GetEventSelectorsOutput { s.EventSelectors = v @@ -5496,8 +5680,9 @@ func (s *KmsKeyDisabledException) RequestID() string { return s.RespMetadata.RequestID } -// This exception is thrown when the KMS key does not exist, or when the S3 -// bucket and the KMS key are not in the same region. +// This exception is thrown when the KMS key does not exist, when the S3 bucket +// and the KMS key are not in the same region, or when the KMS key associated +// with the SNS topic either does not exist or is not in the same region. type KmsKeyNotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -6323,11 +6508,11 @@ func (s *PublicKey) SetValue(v []byte) *PublicKey { type PutEventSelectorsInput struct { _ struct{} `type:"structure"` + AdvancedEventSelectors []*AdvancedEventSelector `type:"list"` + // Specifies the settings for your event selectors. You can configure up to // five event selectors for a trail. - // - // EventSelectors is a required field - EventSelectors []*EventSelector `type:"list" required:"true"` + EventSelectors []*EventSelector `type:"list"` // Specifies the name of the trail or trail ARN. If you specify a trail name, // the string must meet the following requirements: @@ -6365,12 +6550,19 @@ func (s PutEventSelectorsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutEventSelectorsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutEventSelectorsInput"} - if s.EventSelectors == nil { - invalidParams.Add(request.NewErrParamRequired("EventSelectors")) - } if s.TrailName == nil { invalidParams.Add(request.NewErrParamRequired("TrailName")) } + if s.AdvancedEventSelectors != nil { + for i, v := range s.AdvancedEventSelectors { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdvancedEventSelectors", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6378,6 +6570,12 @@ func (s *PutEventSelectorsInput) Validate() error { return nil } +// SetAdvancedEventSelectors sets the AdvancedEventSelectors field's value. +func (s *PutEventSelectorsInput) SetAdvancedEventSelectors(v []*AdvancedEventSelector) *PutEventSelectorsInput { + s.AdvancedEventSelectors = v + return s +} + // SetEventSelectors sets the EventSelectors field's value. func (s *PutEventSelectorsInput) SetEventSelectors(v []*EventSelector) *PutEventSelectorsInput { s.EventSelectors = v @@ -6393,6 +6591,8 @@ func (s *PutEventSelectorsInput) SetTrailName(v string) *PutEventSelectorsInput type PutEventSelectorsOutput struct { _ struct{} `type:"structure"` + AdvancedEventSelectors []*AdvancedEventSelector `type:"list"` + // Specifies the event selectors configured for your trail. EventSelectors []*EventSelector `type:"list"` @@ -6413,6 +6613,12 @@ func (s PutEventSelectorsOutput) GoString() string { return s.String() } +// SetAdvancedEventSelectors sets the AdvancedEventSelectors field's value. +func (s *PutEventSelectorsOutput) SetAdvancedEventSelectors(v []*AdvancedEventSelector) *PutEventSelectorsOutput { + s.AdvancedEventSelectors = v + return s +} + // SetEventSelectors sets the EventSelectors field's value. func (s *PutEventSelectorsOutput) SetEventSelectors(v []*EventSelector) *PutEventSelectorsOutput { s.EventSelectors = v diff --git a/service/cloudtrail/errors.go b/service/cloudtrail/errors.go index 5feef21ae67..34b3cb28640 100644 --- a/service/cloudtrail/errors.go +++ b/service/cloudtrail/errors.go @@ -230,8 +230,9 @@ const ( // ErrCodeKmsKeyNotFoundException for service response error code // "KmsKeyNotFoundException". // - // This exception is thrown when the KMS key does not exist, or when the S3 - // bucket and the KMS key are not in the same region. + // This exception is thrown when the KMS key does not exist, when the S3 bucket + // and the KMS key are not in the same region, or when the KMS key associated + // with the SNS topic either does not exist or is not in the same region. ErrCodeKmsKeyNotFoundException = "KmsKeyNotFoundException" // ErrCodeMaximumNumberOfTrailsExceededException for service response error code diff --git a/service/codebuild/api.go b/service/codebuild/api.go index a336d52067e..ac056599539 100644 --- a/service/codebuild/api.go +++ b/service/codebuild/api.go @@ -1606,6 +1606,86 @@ func (c *CodeBuild) DescribeTestCasesPagesWithContext(ctx aws.Context, input *De return p.Err() } +const opGetReportGroupTrend = "GetReportGroupTrend" + +// GetReportGroupTrendRequest generates a "aws/request.Request" representing the +// client's request for the GetReportGroupTrend operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetReportGroupTrend for more information on using the GetReportGroupTrend +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetReportGroupTrendRequest method. +// req, resp := client.GetReportGroupTrendRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/GetReportGroupTrend +func (c *CodeBuild) GetReportGroupTrendRequest(input *GetReportGroupTrendInput) (req *request.Request, output *GetReportGroupTrendOutput) { + op := &request.Operation{ + Name: opGetReportGroupTrend, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetReportGroupTrendInput{} + } + + output = &GetReportGroupTrendOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetReportGroupTrend API operation for AWS CodeBuild. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation GetReportGroupTrend for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// * ResourceNotFoundException +// The specified AWS resource cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/GetReportGroupTrend +func (c *CodeBuild) GetReportGroupTrend(input *GetReportGroupTrendInput) (*GetReportGroupTrendOutput, error) { + req, out := c.GetReportGroupTrendRequest(input) + return out, req.Send() +} + +// GetReportGroupTrendWithContext is the same as GetReportGroupTrend with the addition of +// the ability to pass a context and additional request options. +// +// See GetReportGroupTrend for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) GetReportGroupTrendWithContext(ctx aws.Context, input *GetReportGroupTrendInput, opts ...request.Option) (*GetReportGroupTrendOutput, error) { + req, out := c.GetReportGroupTrendRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetResourcePolicy = "GetResourcePolicy" // GetResourcePolicyRequest generates a "aws/request.Request" representing the @@ -7875,6 +7955,98 @@ func (s *ExportedEnvironmentVariable) SetValue(v string) *ExportedEnvironmentVar return s } +type GetReportGroupTrendInput struct { + _ struct{} `type:"structure"` + + NumOfReports *int64 `locationName:"numOfReports" min:"1" type:"integer"` + + // ReportGroupArn is a required field + ReportGroupArn *string `locationName:"reportGroupArn" min:"1" type:"string" required:"true"` + + // TrendField is a required field + TrendField *string `locationName:"trendField" type:"string" required:"true" enum:"ReportGroupTrendFieldType"` +} + +// String returns the string representation +func (s GetReportGroupTrendInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReportGroupTrendInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetReportGroupTrendInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetReportGroupTrendInput"} + if s.NumOfReports != nil && *s.NumOfReports < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumOfReports", 1)) + } + if s.ReportGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReportGroupArn")) + } + if s.ReportGroupArn != nil && len(*s.ReportGroupArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReportGroupArn", 1)) + } + if s.TrendField == nil { + invalidParams.Add(request.NewErrParamRequired("TrendField")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNumOfReports sets the NumOfReports field's value. +func (s *GetReportGroupTrendInput) SetNumOfReports(v int64) *GetReportGroupTrendInput { + s.NumOfReports = &v + return s +} + +// SetReportGroupArn sets the ReportGroupArn field's value. +func (s *GetReportGroupTrendInput) SetReportGroupArn(v string) *GetReportGroupTrendInput { + s.ReportGroupArn = &v + return s +} + +// SetTrendField sets the TrendField field's value. +func (s *GetReportGroupTrendInput) SetTrendField(v string) *GetReportGroupTrendInput { + s.TrendField = &v + return s +} + +type GetReportGroupTrendOutput struct { + _ struct{} `type:"structure"` + + RawData []*ReportWithRawData `locationName:"rawData" type:"list"` + + Stats *ReportGroupTrendStats `locationName:"stats" type:"structure"` +} + +// String returns the string representation +func (s GetReportGroupTrendOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReportGroupTrendOutput) GoString() string { + return s.String() +} + +// SetRawData sets the RawData field's value. +func (s *GetReportGroupTrendOutput) SetRawData(v []*ReportWithRawData) *GetReportGroupTrendOutput { + s.RawData = v + return s +} + +// SetStats sets the Stats field's value. +func (s *GetReportGroupTrendOutput) SetStats(v *ReportGroupTrendStats) *GetReportGroupTrendOutput { + s.Stats = v + return s +} + type GetResourcePolicyInput struct { _ struct{} `type:"structure"` @@ -11397,6 +11569,74 @@ func (s *ReportGroup) SetType(v string) *ReportGroup { return s } +type ReportGroupTrendStats struct { + _ struct{} `type:"structure"` + + Average *string `locationName:"average" type:"string"` + + Max *string `locationName:"max" type:"string"` + + Min *string `locationName:"min" type:"string"` +} + +// String returns the string representation +func (s ReportGroupTrendStats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportGroupTrendStats) GoString() string { + return s.String() +} + +// SetAverage sets the Average field's value. +func (s *ReportGroupTrendStats) SetAverage(v string) *ReportGroupTrendStats { + s.Average = &v + return s +} + +// SetMax sets the Max field's value. +func (s *ReportGroupTrendStats) SetMax(v string) *ReportGroupTrendStats { + s.Max = &v + return s +} + +// SetMin sets the Min field's value. +func (s *ReportGroupTrendStats) SetMin(v string) *ReportGroupTrendStats { + s.Min = &v + return s +} + +type ReportWithRawData struct { + _ struct{} `type:"structure"` + + Data *string `locationName:"data" type:"string"` + + ReportArn *string `locationName:"reportArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s ReportWithRawData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportWithRawData) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *ReportWithRawData) SetData(v string) *ReportWithRawData { + s.Data = &v + return s +} + +// SetReportArn sets the ReportArn field's value. +func (s *ReportWithRawData) SetReportArn(v string) *ReportWithRawData { + s.ReportArn = &v + return s +} + // Represents a resolved build artifact. A resolve artifact is an artifact that // is built and deployed to the destination, such as Amazon Simple Storage Service // (Amazon S3). @@ -14672,6 +14912,50 @@ func ReportGroupStatusType_Values() []string { } } +const ( + // ReportGroupTrendFieldTypePassRate is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypePassRate = "PASS_RATE" + + // ReportGroupTrendFieldTypeDuration is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeDuration = "DURATION" + + // ReportGroupTrendFieldTypeTotal is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeTotal = "TOTAL" + + // ReportGroupTrendFieldTypeLineCoverage is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeLineCoverage = "LINE_COVERAGE" + + // ReportGroupTrendFieldTypeLinesCovered is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeLinesCovered = "LINES_COVERED" + + // ReportGroupTrendFieldTypeLinesMissed is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeLinesMissed = "LINES_MISSED" + + // ReportGroupTrendFieldTypeBranchCoverage is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeBranchCoverage = "BRANCH_COVERAGE" + + // ReportGroupTrendFieldTypeBranchesCovered is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeBranchesCovered = "BRANCHES_COVERED" + + // ReportGroupTrendFieldTypeBranchesMissed is a ReportGroupTrendFieldType enum value + ReportGroupTrendFieldTypeBranchesMissed = "BRANCHES_MISSED" +) + +// ReportGroupTrendFieldType_Values returns all elements of the ReportGroupTrendFieldType enum +func ReportGroupTrendFieldType_Values() []string { + return []string{ + ReportGroupTrendFieldTypePassRate, + ReportGroupTrendFieldTypeDuration, + ReportGroupTrendFieldTypeTotal, + ReportGroupTrendFieldTypeLineCoverage, + ReportGroupTrendFieldTypeLinesCovered, + ReportGroupTrendFieldTypeLinesMissed, + ReportGroupTrendFieldTypeBranchCoverage, + ReportGroupTrendFieldTypeBranchesCovered, + ReportGroupTrendFieldTypeBranchesMissed, + } +} + const ( // ReportPackagingTypeZip is a ReportPackagingType enum value ReportPackagingTypeZip = "ZIP" diff --git a/service/codebuild/codebuildiface/interface.go b/service/codebuild/codebuildiface/interface.go index 44883edd43e..5cd1bf39e34 100644 --- a/service/codebuild/codebuildiface/interface.go +++ b/service/codebuild/codebuildiface/interface.go @@ -138,6 +138,10 @@ type CodeBuildAPI interface { DescribeTestCasesPages(*codebuild.DescribeTestCasesInput, func(*codebuild.DescribeTestCasesOutput, bool) bool) error DescribeTestCasesPagesWithContext(aws.Context, *codebuild.DescribeTestCasesInput, func(*codebuild.DescribeTestCasesOutput, bool) bool, ...request.Option) error + GetReportGroupTrend(*codebuild.GetReportGroupTrendInput) (*codebuild.GetReportGroupTrendOutput, error) + GetReportGroupTrendWithContext(aws.Context, *codebuild.GetReportGroupTrendInput, ...request.Option) (*codebuild.GetReportGroupTrendOutput, error) + GetReportGroupTrendRequest(*codebuild.GetReportGroupTrendInput) (*request.Request, *codebuild.GetReportGroupTrendOutput) + GetResourcePolicy(*codebuild.GetResourcePolicyInput) (*codebuild.GetResourcePolicyOutput, error) GetResourcePolicyWithContext(aws.Context, *codebuild.GetResourcePolicyInput, ...request.Option) (*codebuild.GetResourcePolicyOutput, error) GetResourcePolicyRequest(*codebuild.GetResourcePolicyInput) (*request.Request, *codebuild.GetResourcePolicyOutput) diff --git a/service/cognitoidentityprovider/api.go b/service/cognitoidentityprovider/api.go index 86c0162ec51..4a0e782d835 100644 --- a/service/cognitoidentityprovider/api.go +++ b/service/cognitoidentityprovider/api.go @@ -9217,7 +9217,11 @@ func (c *CognitoIdentityProvider) SetUserMFAPreferenceRequest(input *SetUserMFAP // be set as preferred. The preferred MFA factor will be used to authenticate // a user if multiple factors are enabled. If multiple options are enabled and // no preference is set, a challenge to choose an MFA option will be returned -// during sign in. +// during sign in. If an MFA type is enabled for a user, the user will be prompted +// for MFA during all sign in attempts, unless device tracking is turned on +// and the device has been trusted. If you would like MFA to be applied selectively +// based on the assessed risk level of sign in attempts, disable MFA for users +// and turn on Adaptive Authentication for the user pool. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17208,10 +17212,14 @@ type CreateUserPoolInput struct { // The email configuration. EmailConfiguration *EmailConfigurationType `type:"structure"` - // A string representing the email verification message. + // A string representing the email verification message. EmailVerificationMessage + // is allowed only if EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailVerificationMessage *string `min:"6" type:"string"` - // A string representing the email verification subject. + // A string representing the email verification subject. EmailVerificationSubject + // is allowed only if EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailVerificationSubject *string `min:"1" type:"string"` // The Lambda trigger configuration information for the new user pool. @@ -17566,6 +17574,124 @@ func (s *CustomDomainConfigType) SetCertificateArn(v string) *CustomDomainConfig return s } +// A custom email sender Lambda configuration type. +type CustomEmailLambdaVersionConfigType struct { + _ struct{} `type:"structure"` + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito + // triggers to send email notifications to users. + // + // LambdaArn is a required field + LambdaArn *string `min:"20" type:"string" required:"true"` + + // The Lambda version represents the signature of the "request" attribute in + // the "event" information Amazon Cognito passes to your custom email Lambda + // function. The only supported value is V1_0. + // + // LambdaVersion is a required field + LambdaVersion *string `type:"string" required:"true" enum:"CustomEmailSenderLambdaVersionType"` +} + +// String returns the string representation +func (s CustomEmailLambdaVersionConfigType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomEmailLambdaVersionConfigType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomEmailLambdaVersionConfigType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomEmailLambdaVersionConfigType"} + if s.LambdaArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaArn")) + } + if s.LambdaArn != nil && len(*s.LambdaArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("LambdaArn", 20)) + } + if s.LambdaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaArn sets the LambdaArn field's value. +func (s *CustomEmailLambdaVersionConfigType) SetLambdaArn(v string) *CustomEmailLambdaVersionConfigType { + s.LambdaArn = &v + return s +} + +// SetLambdaVersion sets the LambdaVersion field's value. +func (s *CustomEmailLambdaVersionConfigType) SetLambdaVersion(v string) *CustomEmailLambdaVersionConfigType { + s.LambdaVersion = &v + return s +} + +// A custom SMS sender Lambda configuration type. +type CustomSMSLambdaVersionConfigType struct { + _ struct{} `type:"structure"` + + // The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito + // triggers to send SMS notifications to users. + // + // LambdaArn is a required field + LambdaArn *string `min:"20" type:"string" required:"true"` + + // The Lambda version represents the signature of the "request" attribute in + // the "event" information Amazon Cognito passes to your custom SMS Lambda function. + // The only supported value is V1_0. + // + // LambdaVersion is a required field + LambdaVersion *string `type:"string" required:"true" enum:"CustomSMSSenderLambdaVersionType"` +} + +// String returns the string representation +func (s CustomSMSLambdaVersionConfigType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomSMSLambdaVersionConfigType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomSMSLambdaVersionConfigType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomSMSLambdaVersionConfigType"} + if s.LambdaArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaArn")) + } + if s.LambdaArn != nil && len(*s.LambdaArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("LambdaArn", 20)) + } + if s.LambdaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaArn sets the LambdaArn field's value. +func (s *CustomSMSLambdaVersionConfigType) SetLambdaArn(v string) *CustomSMSLambdaVersionConfigType { + s.LambdaArn = &v + return s +} + +// SetLambdaVersion sets the LambdaVersion field's value. +func (s *CustomSMSLambdaVersionConfigType) SetLambdaVersion(v string) *CustomSMSLambdaVersionConfigType { + s.LambdaVersion = &v + return s +} + type DeleteGroupInput struct { _ struct{} `type:"structure"` @@ -18922,6 +19048,10 @@ func (s *DuplicateProviderException) RequestID() string { } // The email configuration type. +// +// Amazon Cognito has specific regions for use with Amazon SES. For more information +// on the supported regions, see Email Settings for Amazon Cognito User Pools +// (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-email.html). type EmailConfigurationType struct { _ struct{} `type:"structure"` @@ -18962,6 +19092,27 @@ type EmailConfigurationType struct { // the FROM address, provide the ARN of an Amazon SES verified email address // for the SourceArn parameter. // + // If EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't + // allowed: + // + // * EmailVerificationMessage + // + // * EmailVerificationSubject + // + // * InviteMessageTemplate.EmailMessage + // + // * InviteMessageTemplate.EmailSubject + // + // * VerificationMessageTemplate.EmailMessage + // + // * VerificationMessageTemplate.EmailMessageByLink + // + // * VerificationMessageTemplate.EmailSubject, + // + // * VerificationMessageTemplate.EmailSubjectByLink + // + // DEVELOPER EmailSendingAccount is required. + // // DEVELOPER // // When Amazon Cognito emails your users, it uses your Amazon SES configuration. @@ -21479,12 +21630,23 @@ type LambdaConfigType struct { // Creates an authentication challenge. CreateAuthChallenge *string `min:"20" type:"string"` + // A custom email sender AWS Lambda trigger. + CustomEmailSender *CustomEmailLambdaVersionConfigType `type:"structure"` + // A custom Message AWS Lambda trigger. CustomMessage *string `min:"20" type:"string"` + // A custom SMS sender AWS Lambda trigger. + CustomSMSSender *CustomSMSLambdaVersionConfigType `type:"structure"` + // Defines the authentication challenge. DefineAuthChallenge *string `min:"20" type:"string"` + // The Amazon Resource Name of Key Management Service Customer master keys (/kms/latest/developerguide/concepts.html#master_keys) + // . Amazon Cognito uses the key to encrypt codes and temporary passwords sent + // to CustomEmailSender and CustomSMSSender. + KMSKeyID *string `min:"20" type:"string"` + // A post-authentication AWS Lambda trigger. PostAuthentication *string `min:"20" type:"string"` @@ -21529,6 +21691,9 @@ func (s *LambdaConfigType) Validate() error { if s.DefineAuthChallenge != nil && len(*s.DefineAuthChallenge) < 20 { invalidParams.Add(request.NewErrParamMinLen("DefineAuthChallenge", 20)) } + if s.KMSKeyID != nil && len(*s.KMSKeyID) < 20 { + invalidParams.Add(request.NewErrParamMinLen("KMSKeyID", 20)) + } if s.PostAuthentication != nil && len(*s.PostAuthentication) < 20 { invalidParams.Add(request.NewErrParamMinLen("PostAuthentication", 20)) } @@ -21550,6 +21715,16 @@ func (s *LambdaConfigType) Validate() error { if s.VerifyAuthChallengeResponse != nil && len(*s.VerifyAuthChallengeResponse) < 20 { invalidParams.Add(request.NewErrParamMinLen("VerifyAuthChallengeResponse", 20)) } + if s.CustomEmailSender != nil { + if err := s.CustomEmailSender.Validate(); err != nil { + invalidParams.AddNested("CustomEmailSender", err.(request.ErrInvalidParams)) + } + } + if s.CustomSMSSender != nil { + if err := s.CustomSMSSender.Validate(); err != nil { + invalidParams.AddNested("CustomSMSSender", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -21563,18 +21738,36 @@ func (s *LambdaConfigType) SetCreateAuthChallenge(v string) *LambdaConfigType { return s } +// SetCustomEmailSender sets the CustomEmailSender field's value. +func (s *LambdaConfigType) SetCustomEmailSender(v *CustomEmailLambdaVersionConfigType) *LambdaConfigType { + s.CustomEmailSender = v + return s +} + // SetCustomMessage sets the CustomMessage field's value. func (s *LambdaConfigType) SetCustomMessage(v string) *LambdaConfigType { s.CustomMessage = &v return s } +// SetCustomSMSSender sets the CustomSMSSender field's value. +func (s *LambdaConfigType) SetCustomSMSSender(v *CustomSMSLambdaVersionConfigType) *LambdaConfigType { + s.CustomSMSSender = v + return s +} + // SetDefineAuthChallenge sets the DefineAuthChallenge field's value. func (s *LambdaConfigType) SetDefineAuthChallenge(v string) *LambdaConfigType { s.DefineAuthChallenge = &v return s } +// SetKMSKeyID sets the KMSKeyID field's value. +func (s *LambdaConfigType) SetKMSKeyID(v string) *LambdaConfigType { + s.KMSKeyID = &v + return s +} + // SetPostAuthentication sets the PostAuthentication field's value. func (s *LambdaConfigType) SetPostAuthentication(v string) *LambdaConfigType { s.PostAuthentication = &v @@ -22805,10 +22998,14 @@ func (s *MFAOptionType) SetDeliveryMedium(v string) *MFAOptionType { type MessageTemplateType struct { _ struct{} `type:"structure"` - // The message template for email messages. + // The message template for email messages. EmailMessage is allowed only if + // EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailMessage *string `min:"6" type:"string"` - // The subject line for email messages. + // The subject line for email messages. EmailSubject is allowed only if EmailSendingAccount + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailSubject *string `min:"1" type:"string"` // The message template for SMS messages. @@ -24179,11 +24376,19 @@ func (s *RiskExceptionConfigurationType) SetSkippedIPRangeList(v []*string) *Ris return s } -// The type used for enabling SMS MFA at the user level. +// The type used for enabling SMS MFA at the user level. Phone numbers don't +// need to be verified to be used for SMS MFA. If an MFA type is enabled for +// a user, the user will be prompted for MFA during all sign in attempts, unless +// device tracking is turned on and the device has been trusted. If you would +// like MFA to be applied selectively based on the assessed risk level of sign +// in attempts, disable MFA for users and turn on Adaptive Authentication for +// the user pool. type SMSMfaSettingsType struct { _ struct{} `type:"structure"` - // Specifies whether SMS text message MFA is enabled. + // Specifies whether SMS text message MFA is enabled. If an MFA type is enabled + // for a user, the user will be prompted for MFA during all sign in attempts, + // unless device tracking is turned on and the device has been trusted. Enabled *bool `type:"boolean"` // Specifies whether SMS is the preferred MFA method. @@ -25125,7 +25330,8 @@ type SmsConfigurationType struct { // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service // (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito - // will use to send SMS messages. + // will use to send SMS messages. SMS messages are subject to a spending limit + // (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html). // // SnsCallerArn is a required field SnsCallerArn *string `min:"20" type:"string" required:"true"` @@ -25304,11 +25510,18 @@ func (s *SoftwareTokenMfaConfigType) SetEnabled(v bool) *SoftwareTokenMfaConfigT return s } -// The type used for enabling software token MFA at the user level. +// The type used for enabling software token MFA at the user level. If an MFA +// type is enabled for a user, the user will be prompted for MFA during all +// sign in attempts, unless device tracking is turned on and the device has +// been trusted. If you would like MFA to be applied selectively based on the +// assessed risk level of sign in attempts, disable MFA for users and turn on +// Adaptive Authentication for the user pool. type SoftwareTokenMfaSettingsType struct { _ struct{} `type:"structure"` - // Specifies whether software token MFA is enabled. + // Specifies whether software token MFA is enabled. If an MFA type is enabled + // for a user, the user will be prompted for MFA during all sign in attempts, + // unless device tracking is turned on and the device has been trusted. Enabled *bool `type:"boolean"` // Specifies whether software token MFA is the preferred MFA method. @@ -29043,17 +29256,25 @@ type VerificationMessageTemplateType struct { // The default email option. DefaultEmailOption *string `type:"string" enum:"DefaultEmailOptionType"` - // The email message template. + // The email message template. EmailMessage is allowed only if EmailSendingAccount + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailMessage *string `min:"6" type:"string"` - // The email message template for sending a confirmation link to the user. + // The email message template for sending a confirmation link to the user. EmailMessageByLink + // is allowed only if EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailMessageByLink *string `min:"6" type:"string"` - // The subject line for the email message template. + // The subject line for the email message template. EmailSubject is allowed + // only if EmailSendingAccount (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailSubject *string `min:"1" type:"string"` // The subject line for the email message template for sending a confirmation - // link to the user. + // link to the user. EmailSubjectByLink is allowed only EmailSendingAccount + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) + // is DEVELOPER. EmailSubjectByLink *string `min:"1" type:"string"` // The SMS message template. @@ -29546,6 +29767,30 @@ func CompromisedCredentialsEventActionType_Values() []string { } } +const ( + // CustomEmailSenderLambdaVersionTypeV10 is a CustomEmailSenderLambdaVersionType enum value + CustomEmailSenderLambdaVersionTypeV10 = "V1_0" +) + +// CustomEmailSenderLambdaVersionType_Values returns all elements of the CustomEmailSenderLambdaVersionType enum +func CustomEmailSenderLambdaVersionType_Values() []string { + return []string{ + CustomEmailSenderLambdaVersionTypeV10, + } +} + +const ( + // CustomSMSSenderLambdaVersionTypeV10 is a CustomSMSSenderLambdaVersionType enum value + CustomSMSSenderLambdaVersionTypeV10 = "V1_0" +) + +// CustomSMSSenderLambdaVersionType_Values returns all elements of the CustomSMSSenderLambdaVersionType enum +func CustomSMSSenderLambdaVersionType_Values() []string { + return []string{ + CustomSMSSenderLambdaVersionTypeV10, + } +} + const ( // DefaultEmailOptionTypeConfirmWithLink is a DefaultEmailOptionType enum value DefaultEmailOptionTypeConfirmWithLink = "CONFIRM_WITH_LINK" diff --git a/service/comprehend/api.go b/service/comprehend/api.go index f76ee8f111e..8de97801eca 100644 --- a/service/comprehend/api.go +++ b/service/comprehend/api.go @@ -1756,6 +1756,94 @@ func (c *Comprehend) DescribeEntityRecognizerWithContext(ctx aws.Context, input return out, req.Send() } +const opDescribeEventsDetectionJob = "DescribeEventsDetectionJob" + +// DescribeEventsDetectionJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventsDetectionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEventsDetectionJob for more information on using the DescribeEventsDetectionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEventsDetectionJobRequest method. +// req, resp := client.DescribeEventsDetectionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeEventsDetectionJob +func (c *Comprehend) DescribeEventsDetectionJobRequest(input *DescribeEventsDetectionJobInput) (req *request.Request, output *DescribeEventsDetectionJobOutput) { + op := &request.Operation{ + Name: opDescribeEventsDetectionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEventsDetectionJobInput{} + } + + output = &DescribeEventsDetectionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEventsDetectionJob API operation for Amazon Comprehend. +// +// Gets the status and details of an events detection job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Comprehend's +// API operation DescribeEventsDetectionJob for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is invalid. +// +// * JobNotFoundException +// The specified job was not found. Check the job ID and try again. +// +// * TooManyRequestsException +// The number of requests exceeds the limit. Resubmit your request later. +// +// * InternalServerException +// An internal server error occurred. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeEventsDetectionJob +func (c *Comprehend) DescribeEventsDetectionJob(input *DescribeEventsDetectionJobInput) (*DescribeEventsDetectionJobOutput, error) { + req, out := c.DescribeEventsDetectionJobRequest(input) + return out, req.Send() +} + +// DescribeEventsDetectionJobWithContext is the same as DescribeEventsDetectionJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEventsDetectionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) DescribeEventsDetectionJobWithContext(ctx aws.Context, input *DescribeEventsDetectionJobInput, opts ...request.Option) (*DescribeEventsDetectionJobOutput, error) { + req, out := c.DescribeEventsDetectionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeKeyPhrasesDetectionJob = "DescribeKeyPhrasesDetectionJob" // DescribeKeyPhrasesDetectionJobRequest generates a "aws/request.Request" representing the @@ -3489,6 +3577,152 @@ func (c *Comprehend) ListEntityRecognizersPagesWithContext(ctx aws.Context, inpu return p.Err() } +const opListEventsDetectionJobs = "ListEventsDetectionJobs" + +// ListEventsDetectionJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListEventsDetectionJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEventsDetectionJobs for more information on using the ListEventsDetectionJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEventsDetectionJobsRequest method. +// req, resp := client.ListEventsDetectionJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListEventsDetectionJobs +func (c *Comprehend) ListEventsDetectionJobsRequest(input *ListEventsDetectionJobsInput) (req *request.Request, output *ListEventsDetectionJobsOutput) { + op := &request.Operation{ + Name: opListEventsDetectionJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEventsDetectionJobsInput{} + } + + output = &ListEventsDetectionJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEventsDetectionJobs API operation for Amazon Comprehend. +// +// Gets a list of the events detection jobs that you have submitted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Comprehend's +// API operation ListEventsDetectionJobs for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is invalid. +// +// * TooManyRequestsException +// The number of requests exceeds the limit. Resubmit your request later. +// +// * InvalidFilterException +// The filter specified for the operation is invalid. Specify a different filter. +// +// * InternalServerException +// An internal server error occurred. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListEventsDetectionJobs +func (c *Comprehend) ListEventsDetectionJobs(input *ListEventsDetectionJobsInput) (*ListEventsDetectionJobsOutput, error) { + req, out := c.ListEventsDetectionJobsRequest(input) + return out, req.Send() +} + +// ListEventsDetectionJobsWithContext is the same as ListEventsDetectionJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListEventsDetectionJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) ListEventsDetectionJobsWithContext(ctx aws.Context, input *ListEventsDetectionJobsInput, opts ...request.Option) (*ListEventsDetectionJobsOutput, error) { + req, out := c.ListEventsDetectionJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEventsDetectionJobsPages iterates over the pages of a ListEventsDetectionJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEventsDetectionJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEventsDetectionJobs operation. +// pageNum := 0 +// err := client.ListEventsDetectionJobsPages(params, +// func(page *comprehend.ListEventsDetectionJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Comprehend) ListEventsDetectionJobsPages(input *ListEventsDetectionJobsInput, fn func(*ListEventsDetectionJobsOutput, bool) bool) error { + return c.ListEventsDetectionJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEventsDetectionJobsPagesWithContext same as ListEventsDetectionJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) ListEventsDetectionJobsPagesWithContext(ctx aws.Context, input *ListEventsDetectionJobsInput, fn func(*ListEventsDetectionJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEventsDetectionJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEventsDetectionJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListEventsDetectionJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListKeyPhrasesDetectionJobs = "ListKeyPhrasesDetectionJobs" // ListKeyPhrasesDetectionJobsRequest generates a "aws/request.Request" representing the @@ -4392,6 +4626,95 @@ func (c *Comprehend) StartEntitiesDetectionJobWithContext(ctx aws.Context, input return out, req.Send() } +const opStartEventsDetectionJob = "StartEventsDetectionJob" + +// StartEventsDetectionJobRequest generates a "aws/request.Request" representing the +// client's request for the StartEventsDetectionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartEventsDetectionJob for more information on using the StartEventsDetectionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartEventsDetectionJobRequest method. +// req, resp := client.StartEventsDetectionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartEventsDetectionJob +func (c *Comprehend) StartEventsDetectionJobRequest(input *StartEventsDetectionJobInput) (req *request.Request, output *StartEventsDetectionJobOutput) { + op := &request.Operation{ + Name: opStartEventsDetectionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartEventsDetectionJobInput{} + } + + output = &StartEventsDetectionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartEventsDetectionJob API operation for Amazon Comprehend. +// +// Starts an asynchronous event detection job for a collection of documents. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Comprehend's +// API operation StartEventsDetectionJob for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is invalid. +// +// * TooManyRequestsException +// The number of requests exceeds the limit. Resubmit your request later. +// +// * KmsKeyValidationException +// The KMS customer managed key (CMK) entered cannot be validated. Verify the +// key and re-enter it. +// +// * InternalServerException +// An internal server error occurred. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartEventsDetectionJob +func (c *Comprehend) StartEventsDetectionJob(input *StartEventsDetectionJobInput) (*StartEventsDetectionJobOutput, error) { + req, out := c.StartEventsDetectionJobRequest(input) + return out, req.Send() +} + +// StartEventsDetectionJobWithContext is the same as StartEventsDetectionJob with the addition of +// the ability to pass a context and additional request options. +// +// See StartEventsDetectionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) StartEventsDetectionJobWithContext(ctx aws.Context, input *StartEventsDetectionJobInput, opts ...request.Option) (*StartEventsDetectionJobOutput, error) { + req, out := c.StartEventsDetectionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartKeyPhrasesDetectionJob = "StartKeyPhrasesDetectionJob" // StartKeyPhrasesDetectionJobRequest generates a "aws/request.Request" representing the @@ -4943,33 +5266,118 @@ func (c *Comprehend) StopEntitiesDetectionJobWithContext(ctx aws.Context, input return out, req.Send() } -const opStopKeyPhrasesDetectionJob = "StopKeyPhrasesDetectionJob" +const opStopEventsDetectionJob = "StopEventsDetectionJob" -// StopKeyPhrasesDetectionJobRequest generates a "aws/request.Request" representing the -// client's request for the StopKeyPhrasesDetectionJob operation. The "output" return +// StopEventsDetectionJobRequest generates a "aws/request.Request" representing the +// client's request for the StopEventsDetectionJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopKeyPhrasesDetectionJob for more information on using the StopKeyPhrasesDetectionJob +// See StopEventsDetectionJob for more information on using the StopEventsDetectionJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopKeyPhrasesDetectionJobRequest method. -// req, resp := client.StopKeyPhrasesDetectionJobRequest(params) +// // Example sending a request using the StopEventsDetectionJobRequest method. +// req, resp := client.StopEventsDetectionJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopKeyPhrasesDetectionJob -func (c *Comprehend) StopKeyPhrasesDetectionJobRequest(input *StopKeyPhrasesDetectionJobInput) (req *request.Request, output *StopKeyPhrasesDetectionJobOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopEventsDetectionJob +func (c *Comprehend) StopEventsDetectionJobRequest(input *StopEventsDetectionJobInput) (req *request.Request, output *StopEventsDetectionJobOutput) { + op := &request.Operation{ + Name: opStopEventsDetectionJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopEventsDetectionJobInput{} + } + + output = &StopEventsDetectionJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopEventsDetectionJob API operation for Amazon Comprehend. +// +// Stops an events detection job in progress. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Comprehend's +// API operation StopEventsDetectionJob for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is invalid. +// +// * JobNotFoundException +// The specified job was not found. Check the job ID and try again. +// +// * InternalServerException +// An internal server error occurred. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopEventsDetectionJob +func (c *Comprehend) StopEventsDetectionJob(input *StopEventsDetectionJobInput) (*StopEventsDetectionJobOutput, error) { + req, out := c.StopEventsDetectionJobRequest(input) + return out, req.Send() +} + +// StopEventsDetectionJobWithContext is the same as StopEventsDetectionJob with the addition of +// the ability to pass a context and additional request options. +// +// See StopEventsDetectionJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Comprehend) StopEventsDetectionJobWithContext(ctx aws.Context, input *StopEventsDetectionJobInput, opts ...request.Option) (*StopEventsDetectionJobOutput, error) { + req, out := c.StopEventsDetectionJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopKeyPhrasesDetectionJob = "StopKeyPhrasesDetectionJob" + +// StopKeyPhrasesDetectionJobRequest generates a "aws/request.Request" representing the +// client's request for the StopKeyPhrasesDetectionJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopKeyPhrasesDetectionJob for more information on using the StopKeyPhrasesDetectionJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopKeyPhrasesDetectionJobRequest method. +// req, resp := client.StopKeyPhrasesDetectionJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopKeyPhrasesDetectionJob +func (c *Comprehend) StopKeyPhrasesDetectionJobRequest(input *StopKeyPhrasesDetectionJobInput) (req *request.Request, output *StopKeyPhrasesDetectionJobOutput) { op := &request.Operation{ Name: opStopKeyPhrasesDetectionJob, HTTPMethod: "POST", @@ -6757,7 +7165,7 @@ type ClassifyDocumentOutput struct { // The labels used the document being analyzed. These are used for multi-label // trained models. Individual labels represent different categories that are - // related in some manner and are not multually exclusive. For example, a movie + // related in some manner and are not mutually exclusive. For example, a movie // can be just an action movie, or it can be an action movie, a science fiction // movie, and a comedy, all at the same time. Labels []*DocumentLabel `type:"list"` @@ -7901,6 +8309,71 @@ func (s *DescribeEntityRecognizerOutput) SetEntityRecognizerProperties(v *Entity return s } +type DescribeEventsDetectionJobInput struct { + _ struct{} `type:"structure"` + + // The identifier of the events detection job. + // + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeEventsDetectionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsDetectionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEventsDetectionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEventsDetectionJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *DescribeEventsDetectionJobInput) SetJobId(v string) *DescribeEventsDetectionJobInput { + s.JobId = &v + return s +} + +type DescribeEventsDetectionJobOutput struct { + _ struct{} `type:"structure"` + + // An object that contains the properties associated with an event detection + // job. + EventsDetectionJobProperties *EventsDetectionJobProperties `type:"structure"` +} + +// String returns the string representation +func (s DescribeEventsDetectionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEventsDetectionJobOutput) GoString() string { + return s.String() +} + +// SetEventsDetectionJobProperties sets the EventsDetectionJobProperties field's value. +func (s *DescribeEventsDetectionJobOutput) SetEventsDetectionJobProperties(v *EventsDetectionJobProperties) *DescribeEventsDetectionJobOutput { + s.EventsDetectionJobProperties = v + return s +} + type DescribeKeyPhrasesDetectionJobInput struct { _ struct{} `type:"structure"` @@ -9555,7 +10028,7 @@ func (s *DominantLanguageDetectionJobProperties) SetVpcConfig(v *VpcConfig) *Dom return s } -// The filter used to determine which endpoints are are returned. You can filter +// The filter used to determine which endpoints are returned. You can filter // jobs on their name, model, status, or the date and time that they were created. // You can only set one filter at a time. type EndpointFilter struct { @@ -10599,7 +11072,7 @@ func (s *EntityRecognizerProperties) SetVpcConfig(v *VpcConfig) *EntityRecognize type EntityTypesEvaluationMetrics struct { _ struct{} `type:"structure"` - // A measure of how accurate the recognizer results are for for a specific entity + // A measure of how accurate the recognizer results are for a specific entity // type in the test data. It is derived from the Precision and Recall values. // The F1Score is the harmonic average of the two scores. The highest score // is 1, and the worst score is 0. @@ -10689,48 +11162,43 @@ func (s *EntityTypesListItem) SetType(v string) *EntityTypesListItem { return s } -// The input properties for a topic detection job. -type InputDataConfig struct { +// Provides information for filtering a list of event detection jobs. +type EventsDetectionJobFilter struct { _ struct{} `type:"structure"` - // Specifies how the text in an input file should be processed: - // - // * ONE_DOC_PER_FILE - Each file is considered a separate document. Use - // this option when you are processing large documents, such as newspaper - // articles or scientific papers. - // - // * ONE_DOC_PER_LINE - Each line in a file is considered a separate document. - // Use this option when you are processing many short documents, such as - // text messages. - InputFormat *string `type:"string" enum:"InputFormat"` + // Filters on the name of the events detection job. + JobName *string `min:"1" type:"string"` - // The Amazon S3 URI for the input data. The URI must be in same region as the - // API endpoint that you are calling. The URI can point to a single input file - // or it can provide the prefix for a collection of data files. - // - // For example, if you use the URI S3://bucketName/prefix, if the prefix is - // a single file, Amazon Comprehend uses that file as input. If more than one - // file begins with the prefix, Amazon Comprehend uses all of them as input. - // - // S3Uri is a required field - S3Uri *string `type:"string" required:"true"` + // Filters the list of jobs based on job status. Returns only jobs with the + // specified status. + JobStatus *string `type:"string" enum:"JobStatus"` + + // Filters the list of jobs based on the time that the job was submitted for + // processing. Returns only jobs submitted after the specified time. Jobs are + // returned in descending order, newest to oldest. + SubmitTimeAfter *time.Time `type:"timestamp"` + + // Filters the list of jobs based on the time that the job was submitted for + // processing. Returns only jobs submitted before the specified time. Jobs are + // returned in ascending order, oldest to newest. + SubmitTimeBefore *time.Time `type:"timestamp"` } // String returns the string representation -func (s InputDataConfig) String() string { +func (s EventsDetectionJobFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InputDataConfig) GoString() string { +func (s EventsDetectionJobFilter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *InputDataConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InputDataConfig"} - if s.S3Uri == nil { - invalidParams.Add(request.NewErrParamRequired("S3Uri")) +func (s *EventsDetectionJobFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventsDetectionJobFilter"} + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) } if invalidParams.Len() > 0 { @@ -10739,98 +11207,289 @@ func (s *InputDataConfig) Validate() error { return nil } -// SetInputFormat sets the InputFormat field's value. -func (s *InputDataConfig) SetInputFormat(v string) *InputDataConfig { - s.InputFormat = &v +// SetJobName sets the JobName field's value. +func (s *EventsDetectionJobFilter) SetJobName(v string) *EventsDetectionJobFilter { + s.JobName = &v return s } -// SetS3Uri sets the S3Uri field's value. -func (s *InputDataConfig) SetS3Uri(v string) *InputDataConfig { - s.S3Uri = &v +// SetJobStatus sets the JobStatus field's value. +func (s *EventsDetectionJobFilter) SetJobStatus(v string) *EventsDetectionJobFilter { + s.JobStatus = &v return s } -// An internal server error occurred. Retry your request. -type InternalServerException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" min:"1" type:"string"` +// SetSubmitTimeAfter sets the SubmitTimeAfter field's value. +func (s *EventsDetectionJobFilter) SetSubmitTimeAfter(v time.Time) *EventsDetectionJobFilter { + s.SubmitTimeAfter = &v + return s } -// String returns the string representation -func (s InternalServerException) String() string { - return awsutil.Prettify(s) +// SetSubmitTimeBefore sets the SubmitTimeBefore field's value. +func (s *EventsDetectionJobFilter) SetSubmitTimeBefore(v time.Time) *EventsDetectionJobFilter { + s.SubmitTimeBefore = &v + return s } -// GoString returns the string representation -func (s InternalServerException) GoString() string { - return s.String() -} +// Provides information about an events detection job. +type EventsDetectionJobProperties struct { + _ struct{} `type:"structure"` -func newErrorInternalServerException(v protocol.ResponseMetadata) error { - return &InternalServerException{ - RespMetadata: v, - } -} + // The Amazon Resource Name (ARN) of the AWS Identify and Access Management + // (IAM) role that grants Amazon Comprehend read access to your input data. + DataAccessRoleArn *string `min:"20" type:"string"` -// Code returns the exception type name. -func (s *InternalServerException) Code() string { - return "InternalServerException" -} + // The time that the events detection job completed. + EndTime *time.Time `type:"timestamp"` -// Message returns the exception's message. -func (s *InternalServerException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} + // The input data configuration that you supplied when you created the events + // detection job. + InputDataConfig *InputDataConfig `type:"structure"` -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalServerException) OrigErr() error { - return nil -} + // The identifier assigned to the events detection job. + JobId *string `min:"1" type:"string"` -func (s *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} + // The name you assigned the events detection job. + JobName *string `min:"1" type:"string"` -// Status code returns the HTTP status code for the request's response error. -func (s *InternalServerException) StatusCode() int { - return s.RespMetadata.StatusCode -} + // The current status of the events detection job. + JobStatus *string `type:"string" enum:"JobStatus"` -// RequestID returns the service's response RequestID for request. -func (s *InternalServerException) RequestID() string { - return s.RespMetadata.RequestID -} + // The language code of the input documents. + LanguageCode *string `type:"string" enum:"LanguageCode"` -// The filter specified for the operation is invalid. Specify a different filter. -type InvalidFilterException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // A description of the status of the events detection job. + Message *string `type:"string"` - Message_ *string `locationName:"Message" min:"1" type:"string"` + // The output data configuration that you supplied when you created the events + // detection job. + OutputDataConfig *OutputDataConfig `type:"structure"` + + // The time that the events detection job was submitted for processing. + SubmitTime *time.Time `type:"timestamp"` + + // The types of events that are detected by the job. + TargetEventTypes []*string `min:"1" type:"list"` } // String returns the string representation -func (s InvalidFilterException) String() string { +func (s EventsDetectionJobProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidFilterException) GoString() string { +func (s EventsDetectionJobProperties) GoString() string { return s.String() } -func newErrorInvalidFilterException(v protocol.ResponseMetadata) error { - return &InvalidFilterException{ - RespMetadata: v, - } -} - +// SetDataAccessRoleArn sets the DataAccessRoleArn field's value. +func (s *EventsDetectionJobProperties) SetDataAccessRoleArn(v string) *EventsDetectionJobProperties { + s.DataAccessRoleArn = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *EventsDetectionJobProperties) SetEndTime(v time.Time) *EventsDetectionJobProperties { + s.EndTime = &v + return s +} + +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *EventsDetectionJobProperties) SetInputDataConfig(v *InputDataConfig) *EventsDetectionJobProperties { + s.InputDataConfig = v + return s +} + +// SetJobId sets the JobId field's value. +func (s *EventsDetectionJobProperties) SetJobId(v string) *EventsDetectionJobProperties { + s.JobId = &v + return s +} + +// SetJobName sets the JobName field's value. +func (s *EventsDetectionJobProperties) SetJobName(v string) *EventsDetectionJobProperties { + s.JobName = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *EventsDetectionJobProperties) SetJobStatus(v string) *EventsDetectionJobProperties { + s.JobStatus = &v + return s +} + +// SetLanguageCode sets the LanguageCode field's value. +func (s *EventsDetectionJobProperties) SetLanguageCode(v string) *EventsDetectionJobProperties { + s.LanguageCode = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *EventsDetectionJobProperties) SetMessage(v string) *EventsDetectionJobProperties { + s.Message = &v + return s +} + +// SetOutputDataConfig sets the OutputDataConfig field's value. +func (s *EventsDetectionJobProperties) SetOutputDataConfig(v *OutputDataConfig) *EventsDetectionJobProperties { + s.OutputDataConfig = v + return s +} + +// SetSubmitTime sets the SubmitTime field's value. +func (s *EventsDetectionJobProperties) SetSubmitTime(v time.Time) *EventsDetectionJobProperties { + s.SubmitTime = &v + return s +} + +// SetTargetEventTypes sets the TargetEventTypes field's value. +func (s *EventsDetectionJobProperties) SetTargetEventTypes(v []*string) *EventsDetectionJobProperties { + s.TargetEventTypes = v + return s +} + +// The input properties for a topic detection job. +type InputDataConfig struct { + _ struct{} `type:"structure"` + + // Specifies how the text in an input file should be processed: + // + // * ONE_DOC_PER_FILE - Each file is considered a separate document. Use + // this option when you are processing large documents, such as newspaper + // articles or scientific papers. + // + // * ONE_DOC_PER_LINE - Each line in a file is considered a separate document. + // Use this option when you are processing many short documents, such as + // text messages. + InputFormat *string `type:"string" enum:"InputFormat"` + + // The Amazon S3 URI for the input data. The URI must be in same region as the + // API endpoint that you are calling. The URI can point to a single input file + // or it can provide the prefix for a collection of data files. + // + // For example, if you use the URI S3://bucketName/prefix, if the prefix is + // a single file, Amazon Comprehend uses that file as input. If more than one + // file begins with the prefix, Amazon Comprehend uses all of them as input. + // + // S3Uri is a required field + S3Uri *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InputDataConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDataConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputDataConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputDataConfig"} + if s.S3Uri == nil { + invalidParams.Add(request.NewErrParamRequired("S3Uri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputFormat sets the InputFormat field's value. +func (s *InputDataConfig) SetInputFormat(v string) *InputDataConfig { + s.InputFormat = &v + return s +} + +// SetS3Uri sets the S3Uri field's value. +func (s *InputDataConfig) SetS3Uri(v string) *InputDataConfig { + s.S3Uri = &v + return s +} + +// An internal server error occurred. Retry your request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The filter specified for the operation is invalid. Specify a different filter. +type InvalidFilterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvalidFilterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidFilterException) GoString() string { + return s.String() +} + +func newErrorInvalidFilterException(v protocol.ResponseMetadata) error { + return &InvalidFilterException{ + RespMetadata: v, + } +} + // Code returns the exception type name. func (s *InvalidFilterException) Code() string { return "InvalidFilterException" @@ -11858,6 +12517,102 @@ func (s *ListEntityRecognizersOutput) SetNextToken(v string) *ListEntityRecogniz return s } +type ListEventsDetectionJobsInput struct { + _ struct{} `type:"structure"` + + // Filters the jobs that are returned. You can filter jobs on their name, status, + // or the date and time that they were submitted. You can only set one filter + // at a time. + Filter *EventsDetectionJobFilter `type:"structure"` + + // The maximum number of results to return in each page. + MaxResults *int64 `min:"1" type:"integer"` + + // Identifies the next page of results to return. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListEventsDetectionJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventsDetectionJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEventsDetectionJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEventsDetectionJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *ListEventsDetectionJobsInput) SetFilter(v *EventsDetectionJobFilter) *ListEventsDetectionJobsInput { + s.Filter = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListEventsDetectionJobsInput) SetMaxResults(v int64) *ListEventsDetectionJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEventsDetectionJobsInput) SetNextToken(v string) *ListEventsDetectionJobsInput { + s.NextToken = &v + return s +} + +type ListEventsDetectionJobsOutput struct { + _ struct{} `type:"structure"` + + // A list containing the properties of each job that is returned. + EventsDetectionJobPropertiesList []*EventsDetectionJobProperties `type:"list"` + + // Identifies the next page of results to return. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListEventsDetectionJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEventsDetectionJobsOutput) GoString() string { + return s.String() +} + +// SetEventsDetectionJobPropertiesList sets the EventsDetectionJobPropertiesList field's value. +func (s *ListEventsDetectionJobsOutput) SetEventsDetectionJobPropertiesList(v []*EventsDetectionJobProperties) *ListEventsDetectionJobsOutput { + s.EventsDetectionJobPropertiesList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEventsDetectionJobsOutput) SetNextToken(v string) *ListEventsDetectionJobsOutput { + s.NextToken = &v + return s +} + type ListKeyPhrasesDetectionJobsInput struct { _ struct{} `type:"structure"` @@ -13856,6 +14611,175 @@ func (s *StartEntitiesDetectionJobOutput) SetJobStatus(v string) *StartEntitiesD return s } +type StartEventsDetectionJobInput struct { + _ struct{} `type:"structure"` + + // An unique identifier for the request. If you don't set the client request + // token, Amazon Comprehend generates one. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that grants Amazon Comprehend read access to your input data. + // + // DataAccessRoleArn is a required field + DataAccessRoleArn *string `min:"20" type:"string" required:"true"` + + // Specifies the format and location of the input data for the job. + // + // InputDataConfig is a required field + InputDataConfig *InputDataConfig `type:"structure" required:"true"` + + // The identifier of the events detection job. + JobName *string `min:"1" type:"string"` + + // The language code of the input documents. + // + // LanguageCode is a required field + LanguageCode *string `type:"string" required:"true" enum:"LanguageCode"` + + // Specifies where to send the output files. + // + // OutputDataConfig is a required field + OutputDataConfig *OutputDataConfig `type:"structure" required:"true"` + + // The types of events to detect in the input documents. + // + // TargetEventTypes is a required field + TargetEventTypes []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartEventsDetectionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartEventsDetectionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartEventsDetectionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartEventsDetectionJobInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.DataAccessRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("DataAccessRoleArn")) + } + if s.DataAccessRoleArn != nil && len(*s.DataAccessRoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("DataAccessRoleArn", 20)) + } + if s.InputDataConfig == nil { + invalidParams.Add(request.NewErrParamRequired("InputDataConfig")) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobName", 1)) + } + if s.LanguageCode == nil { + invalidParams.Add(request.NewErrParamRequired("LanguageCode")) + } + if s.OutputDataConfig == nil { + invalidParams.Add(request.NewErrParamRequired("OutputDataConfig")) + } + if s.TargetEventTypes == nil { + invalidParams.Add(request.NewErrParamRequired("TargetEventTypes")) + } + if s.TargetEventTypes != nil && len(s.TargetEventTypes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetEventTypes", 1)) + } + if s.InputDataConfig != nil { + if err := s.InputDataConfig.Validate(); err != nil { + invalidParams.AddNested("InputDataConfig", err.(request.ErrInvalidParams)) + } + } + if s.OutputDataConfig != nil { + if err := s.OutputDataConfig.Validate(); err != nil { + invalidParams.AddNested("OutputDataConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *StartEventsDetectionJobInput) SetClientRequestToken(v string) *StartEventsDetectionJobInput { + s.ClientRequestToken = &v + return s +} + +// SetDataAccessRoleArn sets the DataAccessRoleArn field's value. +func (s *StartEventsDetectionJobInput) SetDataAccessRoleArn(v string) *StartEventsDetectionJobInput { + s.DataAccessRoleArn = &v + return s +} + +// SetInputDataConfig sets the InputDataConfig field's value. +func (s *StartEventsDetectionJobInput) SetInputDataConfig(v *InputDataConfig) *StartEventsDetectionJobInput { + s.InputDataConfig = v + return s +} + +// SetJobName sets the JobName field's value. +func (s *StartEventsDetectionJobInput) SetJobName(v string) *StartEventsDetectionJobInput { + s.JobName = &v + return s +} + +// SetLanguageCode sets the LanguageCode field's value. +func (s *StartEventsDetectionJobInput) SetLanguageCode(v string) *StartEventsDetectionJobInput { + s.LanguageCode = &v + return s +} + +// SetOutputDataConfig sets the OutputDataConfig field's value. +func (s *StartEventsDetectionJobInput) SetOutputDataConfig(v *OutputDataConfig) *StartEventsDetectionJobInput { + s.OutputDataConfig = v + return s +} + +// SetTargetEventTypes sets the TargetEventTypes field's value. +func (s *StartEventsDetectionJobInput) SetTargetEventTypes(v []*string) *StartEventsDetectionJobInput { + s.TargetEventTypes = v + return s +} + +type StartEventsDetectionJobOutput struct { + _ struct{} `type:"structure"` + + // An unique identifier for the request. If you don't set the client request + // token, Amazon Comprehend generates one. + JobId *string `min:"1" type:"string"` + + // The status of the events detection job. + JobStatus *string `type:"string" enum:"JobStatus"` +} + +// String returns the string representation +func (s StartEventsDetectionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartEventsDetectionJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *StartEventsDetectionJobOutput) SetJobId(v string) *StartEventsDetectionJobOutput { + s.JobId = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *StartEventsDetectionJobOutput) SetJobStatus(v string) *StartEventsDetectionJobOutput { + s.JobStatus = &v + return s +} + type StartKeyPhrasesDetectionJobInput struct { _ struct{} `type:"structure"` @@ -14774,6 +15698,79 @@ func (s *StopEntitiesDetectionJobOutput) SetJobStatus(v string) *StopEntitiesDet return s } +type StopEventsDetectionJobInput struct { + _ struct{} `type:"structure"` + + // The identifier of the events detection job to stop. + // + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopEventsDetectionJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopEventsDetectionJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopEventsDetectionJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopEventsDetectionJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *StopEventsDetectionJobInput) SetJobId(v string) *StopEventsDetectionJobInput { + s.JobId = &v + return s +} + +type StopEventsDetectionJobOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the events detection job to stop. + JobId *string `min:"1" type:"string"` + + // The status of the events detection job. + JobStatus *string `type:"string" enum:"JobStatus"` +} + +// String returns the string representation +func (s StopEventsDetectionJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopEventsDetectionJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *StopEventsDetectionJobOutput) SetJobId(v string) *StopEventsDetectionJobOutput { + s.JobId = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *StopEventsDetectionJobOutput) SetJobStatus(v string) *StopEventsDetectionJobOutput { + s.JobStatus = &v + return s +} + type StopKeyPhrasesDetectionJobInput struct { _ struct{} `type:"structure"` @@ -15939,7 +16936,7 @@ func (s UpdateEndpointOutput) GoString() string { } // Configuration parameters for an optional private Virtual Private Cloud (VPC) -// containing the resources you are using for the job. For For more information, +// containing the resources you are using for the job. For more information, // see Amazon VPC (https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). type VpcConfig struct { _ struct{} `type:"structure"` diff --git a/service/comprehend/comprehendiface/interface.go b/service/comprehend/comprehendiface/interface.go index 50799bba75e..edc28d84377 100644 --- a/service/comprehend/comprehendiface/interface.go +++ b/service/comprehend/comprehendiface/interface.go @@ -132,6 +132,10 @@ type ComprehendAPI interface { DescribeEntityRecognizerWithContext(aws.Context, *comprehend.DescribeEntityRecognizerInput, ...request.Option) (*comprehend.DescribeEntityRecognizerOutput, error) DescribeEntityRecognizerRequest(*comprehend.DescribeEntityRecognizerInput) (*request.Request, *comprehend.DescribeEntityRecognizerOutput) + DescribeEventsDetectionJob(*comprehend.DescribeEventsDetectionJobInput) (*comprehend.DescribeEventsDetectionJobOutput, error) + DescribeEventsDetectionJobWithContext(aws.Context, *comprehend.DescribeEventsDetectionJobInput, ...request.Option) (*comprehend.DescribeEventsDetectionJobOutput, error) + DescribeEventsDetectionJobRequest(*comprehend.DescribeEventsDetectionJobInput) (*request.Request, *comprehend.DescribeEventsDetectionJobOutput) + DescribeKeyPhrasesDetectionJob(*comprehend.DescribeKeyPhrasesDetectionJobInput) (*comprehend.DescribeKeyPhrasesDetectionJobOutput, error) DescribeKeyPhrasesDetectionJobWithContext(aws.Context, *comprehend.DescribeKeyPhrasesDetectionJobInput, ...request.Option) (*comprehend.DescribeKeyPhrasesDetectionJobOutput, error) DescribeKeyPhrasesDetectionJobRequest(*comprehend.DescribeKeyPhrasesDetectionJobInput) (*request.Request, *comprehend.DescribeKeyPhrasesDetectionJobOutput) @@ -211,6 +215,13 @@ type ComprehendAPI interface { ListEntityRecognizersPages(*comprehend.ListEntityRecognizersInput, func(*comprehend.ListEntityRecognizersOutput, bool) bool) error ListEntityRecognizersPagesWithContext(aws.Context, *comprehend.ListEntityRecognizersInput, func(*comprehend.ListEntityRecognizersOutput, bool) bool, ...request.Option) error + ListEventsDetectionJobs(*comprehend.ListEventsDetectionJobsInput) (*comprehend.ListEventsDetectionJobsOutput, error) + ListEventsDetectionJobsWithContext(aws.Context, *comprehend.ListEventsDetectionJobsInput, ...request.Option) (*comprehend.ListEventsDetectionJobsOutput, error) + ListEventsDetectionJobsRequest(*comprehend.ListEventsDetectionJobsInput) (*request.Request, *comprehend.ListEventsDetectionJobsOutput) + + ListEventsDetectionJobsPages(*comprehend.ListEventsDetectionJobsInput, func(*comprehend.ListEventsDetectionJobsOutput, bool) bool) error + ListEventsDetectionJobsPagesWithContext(aws.Context, *comprehend.ListEventsDetectionJobsInput, func(*comprehend.ListEventsDetectionJobsOutput, bool) bool, ...request.Option) error + ListKeyPhrasesDetectionJobs(*comprehend.ListKeyPhrasesDetectionJobsInput) (*comprehend.ListKeyPhrasesDetectionJobsOutput, error) ListKeyPhrasesDetectionJobsWithContext(aws.Context, *comprehend.ListKeyPhrasesDetectionJobsInput, ...request.Option) (*comprehend.ListKeyPhrasesDetectionJobsOutput, error) ListKeyPhrasesDetectionJobsRequest(*comprehend.ListKeyPhrasesDetectionJobsInput) (*request.Request, *comprehend.ListKeyPhrasesDetectionJobsOutput) @@ -252,6 +263,10 @@ type ComprehendAPI interface { StartEntitiesDetectionJobWithContext(aws.Context, *comprehend.StartEntitiesDetectionJobInput, ...request.Option) (*comprehend.StartEntitiesDetectionJobOutput, error) StartEntitiesDetectionJobRequest(*comprehend.StartEntitiesDetectionJobInput) (*request.Request, *comprehend.StartEntitiesDetectionJobOutput) + StartEventsDetectionJob(*comprehend.StartEventsDetectionJobInput) (*comprehend.StartEventsDetectionJobOutput, error) + StartEventsDetectionJobWithContext(aws.Context, *comprehend.StartEventsDetectionJobInput, ...request.Option) (*comprehend.StartEventsDetectionJobOutput, error) + StartEventsDetectionJobRequest(*comprehend.StartEventsDetectionJobInput) (*request.Request, *comprehend.StartEventsDetectionJobOutput) + StartKeyPhrasesDetectionJob(*comprehend.StartKeyPhrasesDetectionJobInput) (*comprehend.StartKeyPhrasesDetectionJobOutput, error) StartKeyPhrasesDetectionJobWithContext(aws.Context, *comprehend.StartKeyPhrasesDetectionJobInput, ...request.Option) (*comprehend.StartKeyPhrasesDetectionJobOutput, error) StartKeyPhrasesDetectionJobRequest(*comprehend.StartKeyPhrasesDetectionJobInput) (*request.Request, *comprehend.StartKeyPhrasesDetectionJobOutput) @@ -276,6 +291,10 @@ type ComprehendAPI interface { StopEntitiesDetectionJobWithContext(aws.Context, *comprehend.StopEntitiesDetectionJobInput, ...request.Option) (*comprehend.StopEntitiesDetectionJobOutput, error) StopEntitiesDetectionJobRequest(*comprehend.StopEntitiesDetectionJobInput) (*request.Request, *comprehend.StopEntitiesDetectionJobOutput) + StopEventsDetectionJob(*comprehend.StopEventsDetectionJobInput) (*comprehend.StopEventsDetectionJobOutput, error) + StopEventsDetectionJobWithContext(aws.Context, *comprehend.StopEventsDetectionJobInput, ...request.Option) (*comprehend.StopEventsDetectionJobOutput, error) + StopEventsDetectionJobRequest(*comprehend.StopEventsDetectionJobInput) (*request.Request, *comprehend.StopEventsDetectionJobOutput) + StopKeyPhrasesDetectionJob(*comprehend.StopKeyPhrasesDetectionJobInput) (*comprehend.StopKeyPhrasesDetectionJobOutput, error) StopKeyPhrasesDetectionJobWithContext(aws.Context, *comprehend.StopKeyPhrasesDetectionJobInput, ...request.Option) (*comprehend.StopKeyPhrasesDetectionJobOutput, error) StopKeyPhrasesDetectionJobRequest(*comprehend.StopKeyPhrasesDetectionJobInput) (*request.Request, *comprehend.StopKeyPhrasesDetectionJobOutput) diff --git a/service/elasticbeanstalk/api.go b/service/elasticbeanstalk/api.go index a35896b3ed8..8cc9d83ef12 100644 --- a/service/elasticbeanstalk/api.go +++ b/service/elasticbeanstalk/api.go @@ -7564,7 +7564,7 @@ type DescribeEnvironmentManagedActionHistoryInput struct { EnvironmentName *string `min:"4" type:"string"` // The maximum number of items to return for a single request. - MaxItems *int64 `type:"integer"` + MaxItems *int64 `min:"1" type:"integer"` // The pagination token returned by a previous request. NextToken *string `type:"string"` @@ -7586,6 +7586,9 @@ func (s *DescribeEnvironmentManagedActionHistoryInput) Validate() error { if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } if invalidParams.Len() > 0 { return invalidParams diff --git a/service/fsx/api.go b/service/fsx/api.go index d147973274f..1a4a41c401d 100644 --- a/service/fsx/api.go +++ b/service/fsx/api.go @@ -269,7 +269,7 @@ func (c *FSx) CreateBackupRequest(input *CreateBackupInput) (req *request.Reques // For more information about backing up Amazon FSx for Lustre file systems, // see Working with FSx for Lustre backups (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). // -// For more information about backing up Amazon FSx for Lustre file systems, +// For more information about backing up Amazon FSx for Windows file systems, // see Working with FSx for Windows backups (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html). // // If a backup with the specified client request token exists, and the parameters @@ -400,9 +400,9 @@ func (c *FSx) CreateDataRepositoryTaskRequest(input *CreateDataRepositoryTaskInp // and symbolic links (symlinks) from your FSx file system to its linked data // repository. A CreateDataRepositoryTask operation will fail if a data repository // is not linked to the FSx file system. To learn more about data repository -// tasks, see Using Data Repository Tasks (https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-repository-tasks.html). -// To learn more about linking a data repository to your file system, see Setting -// the Export Prefix (https://docs.aws.amazon.com/fsx/latest/LustreGuide/export-data-repository.html#export-prefix). +// tasks, see Data Repository Tasks (https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-repository-tasks.html). +// To learn more about linking a data repository to your file system, see Linking +// your file system to an S3 bucket (https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-fs-linked-data-repo.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2060,6 +2060,8 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // // * DailyAutomaticBackupStartTime // +// * StorageCapacity +// // * WeeklyMaintenanceStartTime // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2216,8 +2218,8 @@ func (s *ActiveDirectoryError) RequestID() string { return s.RespMetadata.RequestID } -// Describes a specific Amazon FSx Administrative Action for the current Windows -// file system. +// Describes a specific Amazon FSx administrative action for the current Windows +// or Lustre file system. type AdministrativeAction struct { _ struct{} `type:"structure"` @@ -2229,11 +2231,16 @@ type AdministrativeAction struct { // // * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase // a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION - // task starts. Storage optimization is the process of migrating the file - // system data to the new, larger disks. You can track the storage migration - // progress using the ProgressPercent property. When STORAGE_OPTIMIZATION - // completes successfully, the parent FILE_SYSTEM_UPDATE action status changes - // to COMPLETED. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + // task starts. For Windows, storage optimization is the process of migrating + // the file system data to the new, larger disks. For Lustre, storage optimization + // consists of rebalancing the data across the existing and newly added file + // servers. You can track the storage optimization progress using the ProgressPercent + // property. When STORAGE_OPTIMIZATION completes successfully, the parent + // FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, + // see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. // // * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a // new DNS alias with the file system. For more information, see . @@ -2265,7 +2272,10 @@ type AdministrativeAction struct { // * UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon // FSx has updated the file system with the new storage capacity, and is // now performing the storage optimization process. For more information, - // see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + // see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. Status *string `type:"string" enum:"Status"` // Describes the target value for the administration action, provided in the @@ -2523,7 +2533,11 @@ func (s *AssociateFileSystemAliasesOutput) SetAliases(v []*Alias) *AssociateFile return s } -// A backup of an Amazon FSx for file system. +// A backup of an Amazon FSx file system. For more information see: +// +// * Working with backups for Windows file systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html) +// +// * Working with backups for Lustre file systems (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html) type Backup struct { _ struct{} `type:"structure"` @@ -2558,12 +2572,15 @@ type Backup struct { // // * AVAILABLE - The backup is fully available. // - // * CREATING - FSx is creating the backup. + // * PENDING - For user-initiated backups on Lustre file systems only; Amazon + // FSx has not started creating the backup. + // + // * CREATING - Amazon FSx is creating the backup. // - // * TRANSFERRING - For Lustre file systems only; FSx is transferring the - // backup to S3. + // * TRANSFERRING - For user-initiated backups on Lustre file systems only; + // Amazon FSx is transferring the backup to S3. // - // * DELETED - The backup was deleted is no longer available. + // * DELETED - Amazon FSx deleted the backup and it is no longer available. // // * FAILED - Amazon FSx could not complete the backup. // @@ -5853,7 +5870,7 @@ type FileSystem struct { // The Amazon Resource Name (ARN) for the file system resource. ResourceARN *string `min:"8" type:"string"` - // The storage capacity of the file system in gigabytes (GB). + // The storage capacity of the file system in gibibytes (GiB). StorageCapacity *int64 `type:"integer"` // The storage type of the file system. Valid values are SSD and HDD. If set @@ -7612,14 +7629,33 @@ type UpdateFileSystemInput struct { // UpdateFileSystem operation. LustreConfiguration *UpdateFileSystemLustreConfiguration `type:"structure"` - // Use this parameter to increase the storage capacity of an Amazon FSx for - // Windows File Server file system. Specifies the storage capacity target value, - // GiB, for the file system you're updating. The storage capacity target value - // must be at least 10 percent (%) greater than the current storage capacity - // value. In order to increase storage capacity, the file system needs to have - // at least 16 MB/s of throughput capacity. You cannot make a storage capacity - // increase request if there is an existing storage capacity increase request - // in progress. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + // Use this parameter to increase the storage capacity of an Amazon FSx file + // system. Specifies the storage capacity target value, GiB, to increase the + // storage capacity for the file system that you're updating. You cannot make + // a storage capacity increase request if there is an existing storage capacity + // increase request in progress. + // + // For Windows file systems, the storage capacity target value must be at least + // 10 percent (%) greater than the current storage capacity value. In order + // to increase storage capacity, the file system must have at least 16 MB/s + // of throughput capacity. + // + // For Lustre file systems, the storage capacity target value can be the following: + // + // * For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values are + // in multiples of 2400 GiB. The value must be greater than the current storage + // capacity. + // + // * For PERSISTENT HDD file systems, valid values are multiples of 6000 + // GiB for 12 MB/s/TiB file systems and multiples of 1800 GiB for 40 MB/s/TiB + // file systems. The values must be greater than the current storage capacity. + // + // * For SCRATCH_1 file systems, you cannot increase the storage capacity. + // + // For more information, see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. StorageCapacity *int64 `type:"integer"` // The configuration updates for an Amazon FSx for Windows File Server file @@ -8138,11 +8174,16 @@ func ActiveDirectoryErrorType_Values() []string { // // * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase // a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION -// task starts. Storage optimization is the process of migrating the file -// system data to the new, larger disks. You can track the storage migration -// progress using the ProgressPercent property. When STORAGE_OPTIMIZATION -// completes successfully, the parent FILE_SYSTEM_UPDATE action status changes -// to COMPLETED. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). +// task starts. For Windows, storage optimization is the process of migrating +// the file system data to the new, larger disks. For Lustre, storage optimization +// consists of rebalancing the data across the existing and newly added file +// servers. You can track the storage optimization progress using the ProgressPercent +// property. When STORAGE_OPTIMIZATION completes successfully, the parent +// FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, +// see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) +// in the Amazon FSx for Windows File Server User Guide and Managing storage +// and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) +// in the Amazon FSx for Lustre User Guide. // // * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a // new DNS alias with the file system. For more information, see . @@ -8225,12 +8266,15 @@ func AutoImportPolicyType_Values() []string { // // * AVAILABLE - The backup is fully available. // -// * CREATING - FSx is creating the new user-intiated backup +// * PENDING - For user-initiated backups on Lustre file systems only; Amazon +// FSx has not started creating the backup. +// +// * CREATING - Amazon FSx is creating the new user-intiated backup // // * TRANSFERRING - For user-initiated backups on Lustre file systems only; -// FSx is backing up the file system. +// Amazon FSx is backing up the file system. // -// * DELETED - The backup was deleted is no longer available. +// * DELETED - Amazon FSx deleted the backup and it is no longer available. // // * FAILED - Amazon FSx could not complete the backup. const ( @@ -8248,6 +8292,9 @@ const ( // BackupLifecycleFailed is a BackupLifecycle enum value BackupLifecycleFailed = "FAILED" + + // BackupLifecyclePending is a BackupLifecycle enum value + BackupLifecyclePending = "PENDING" ) // BackupLifecycle_Values returns all elements of the BackupLifecycle enum @@ -8258,6 +8305,7 @@ func BackupLifecycle_Values() []string { BackupLifecycleTransferring, BackupLifecycleDeleted, BackupLifecycleFailed, + BackupLifecyclePending, } } diff --git a/service/gamelift/api.go b/service/gamelift/api.go index d756494a899..978b9bda1f4 100644 --- a/service/gamelift/api.go +++ b/service/gamelift/api.go @@ -83,9 +83,9 @@ func (c *GameLift) AcceptMatchRequest(input *AcceptMatchInput) (req *request.Req // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html) // -// FlexMatch Events Reference (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-events.html) +// FlexMatch Events Reference (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html) // // Related operations // @@ -1234,30 +1234,35 @@ func (c *GameLift) CreateMatchmakingConfigurationRequest(input *CreateMatchmakin // CreateMatchmakingConfiguration API operation for Amazon GameLift. // -// Defines a new matchmaking configuration for use with FlexMatch. A matchmaking -// configuration sets out guidelines for matching players and getting the matches -// into games. You can set up multiple matchmaking configurations to handle -// the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking -// or StartMatchBackfill) specifies a configuration for the match and provides -// player attributes to support the configuration being used. -// -// To create a matchmaking configuration, at a minimum you must specify the -// following: configuration name; a rule set that governs how to evaluate players -// and find acceptable matches; a game session queue to use when placing a new -// game session for the match; and the maximum time allowed for a matchmaking -// attempt. -// -// To track the progress of matchmaking tickets, set up an Amazon Simple Notification -// Service (SNS) to receive notifications, and provide the topic ARN in the -// matchmaking configuration. An alternative method, continuously poling ticket -// status with DescribeMatchmaking, should only be used for games in development -// with low matchmaking usage. +// Defines a new matchmaking configuration for use with FlexMatch. Whether your +// are using FlexMatch with GameLift hosting or as a standalone matchmaking +// service, the matchmaking configuration sets out rules for matching players +// and forming teams. If you're also using GameLift hosting, it defines how +// to start game sessions for each match. Your matchmaking system can use multiple +// configurations to handle different game scenarios. All matchmaking requests +// (StartMatchmaking or StartMatchBackfill) identify the matchmaking configuration +// to use and provide player attributes consistent with that configuration. +// +// To create a matchmaking configuration, you must provide the following: configuration +// name and FlexMatch mode (with or without GameLift hosting); a rule set that +// specifies how to evaluate players and find acceptable matches; whether player +// acceptance is required; and the maximum time allowed for a matchmaking attempt. +// When using FlexMatch with GameLift hosting, you also need to identify the +// game session queue to use when starting a game session for the match. +// +// In addition, you must set up an Amazon Simple Notification Service (SNS) +// to receive matchmaking notifications, and provide the topic ARN in the matchmaking +// configuration. An alternative method, continuously polling ticket status +// with DescribeMatchmaking, is only suitable for games in development with +// low matchmaking usage. // // Learn more // -// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) +// FlexMatch Developer Guide (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html) // -// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html) +// +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) // // Related operations // @@ -1391,11 +1396,11 @@ func (c *GameLift) CreateMatchmakingRuleSetRequest(input *CreateMatchmakingRuleS // // Learn more // -// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) +// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html) // -// * Design a Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) +// * Design a Matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html) // -// * Matchmaking with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html) +// * Matchmaking with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-intro.html) // // Related operations // @@ -2900,7 +2905,7 @@ func (c *GameLift) DeleteMatchmakingRuleSetRequest(input *DeleteMatchmakingRuleS // // Learn more // -// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) +// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html) // // Related operations // @@ -6021,9 +6026,9 @@ func (c *GameLift) DescribeMatchmakingRequest(input *DescribeMatchmakingInput) ( // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html) // -// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) // // Related operations // @@ -6143,7 +6148,7 @@ func (c *GameLift) DescribeMatchmakingConfigurationsRequest(input *DescribeMatch // // Learn more // -// Setting Up FlexMatch Matchmakers (https://docs.aws.amazon.com/gamelift/latest/developerguide/matchmaker-build.html) +// Setting Up FlexMatch Matchmakers (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/matchmaker-build.html) // // Related operations // @@ -6315,7 +6320,7 @@ func (c *GameLift) DescribeMatchmakingRuleSetsRequest(input *DescribeMatchmaking // // Learn more // -// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) +// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html) // // Related operations // @@ -9803,13 +9808,13 @@ func (c *GameLift) StartMatchBackfillRequest(input *StartMatchBackfillInput) (re // game session's connection information, and the GameSession object is updated // to include matchmaker data on the new players. For more detail on how match // backfill requests are processed, see How Amazon GameLift FlexMatch Works -// (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html). +// (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html). // // Learn more // -// Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html) +// Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html) // -// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html) +// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html) // // Related operations // @@ -9914,61 +9919,35 @@ func (c *GameLift) StartMatchmakingRequest(input *StartMatchmakingInput) (req *r // StartMatchmaking API operation for Amazon GameLift. // // Uses FlexMatch to create a game match for a group of players based on custom -// matchmaking rules, and starts a new game for the matched players. Each matchmaking -// request specifies the type of match to build (team configuration, rules for -// an acceptable match, etc.). The request also specifies the players to find -// a match for and where to host the new game session for optimal performance. -// A matchmaking request might start with a single player or a group of players -// who want to play together. FlexMatch finds additional players as needed to -// fill the match. Match type, rules, and the queue used to place a new game -// session are defined in a MatchmakingConfiguration. +// matchmaking rules. If you're also using GameLift hosting, a new game session +// is started for the matched players. Each matchmaking request identifies one +// or more players to find a match for, and specifies the type of match to build, +// including the team configuration and the rules for an acceptable match. When +// a matchmaking request identifies a group of players who want to play together, +// FlexMatch finds additional players to fill the match. Match type, rules, +// and other features are defined in a MatchmakingConfiguration. // // To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, -// and include the players to be matched. You must also include a set of player -// attributes relevant for the matchmaking configuration. If successful, a matchmaking -// ticket is returned with status set to QUEUED. -// -// Track the status of the ticket to respond as needed and acquire game session -// connection information for successfully completed matches. Ticket status -// updates are tracked using event notification through Amazon Simple Notification -// Service (SNS), which is defined in the matchmaking configuration. -// -// Processing a matchmaking request -- FlexMatch handles a matchmaking request -// as follows: -// -// Your client code submits a StartMatchmaking request for one or more players -// and tracks the status of the request ticket. -// -// FlexMatch uses this ticket and others in process to build an acceptable match. -// When a potential match is identified, all tickets in the proposed match are -// advanced to the next status. -// -// If the match requires player acceptance (set in the matchmaking configuration), -// the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your -// client code to solicit acceptance from all players in every ticket involved -// in the match, and then call AcceptMatch for each player. If any player rejects -// or fails to accept the match before a specified timeout, the proposed match -// is dropped (see AcceptMatch for more details). -// -// Once a match is proposed and accepted, the matchmaking tickets move into -// status PLACING. FlexMatch locates resources for a new game session using -// the game session queue (set in the matchmaking configuration) and creates -// the game session based on the match data. -// -// When the match is successfully placed, the matchmaking tickets move into -// COMPLETED status. Connection information (including game session endpoint -// and player session) is added to the matchmaking tickets. Matched players -// can use the connection information to join the game. +// and include the players to be matched. For each player, you must also include +// the player attribute values that are required by the matchmaking configuration +// (in the rule set). If successful, a matchmaking ticket is returned with status +// set to QUEUED. +// +// Track the status of the ticket to respond as needed. If you're also using +// GameLift hosting, a successfully completed ticket contains game session connection +// information. Ticket status updates are tracked using event notification through +// Amazon Simple Notification Service (SNS), which is defined in the matchmaking +// configuration. // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html) // -// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) // -// FlexMatch Integration Roadmap (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-tasks.html) +// FlexMatch Integration Roadmap (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-tasks.html) // -// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html) +// How GameLift FlexMatch Works (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/gamelift-match.html) // // Related operations // @@ -10315,7 +10294,7 @@ func (c *GameLift) StopMatchmakingRequest(input *StopMatchmakingInput) (req *req // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-client.html) // // Related operations // @@ -11930,7 +11909,7 @@ func (c *GameLift) UpdateMatchmakingConfigurationRequest(input *UpdateMatchmakin // // Learn more // -// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) +// Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html) // // Related operations // @@ -12297,7 +12276,7 @@ func (c *GameLift) ValidateMatchmakingRuleSetRequest(input *ValidateMatchmakingR // // Learn more // -// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) +// * Build a Rule Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html) // // Related operations // @@ -13106,12 +13085,14 @@ type CreateBuildInput struct { // cannot be changed later. OperatingSystem *string `type:"string" enum:"OperatingSystem"` - // Information indicating where your game build files are stored. Use this parameter - // only when creating a build with files stored in an S3 bucket that you own. - // The storage location must specify an S3 bucket name and key. The location - // must also specify a role ARN that you set up to allow Amazon GameLift to - // access your S3 bucket. The S3 bucket and your new build must be in the same - // Region. + // The location where your game build files are stored. Use this parameter only + // when creating a build using files that are stored in an S3 bucket that you + // own. Identify an S3 bucket name and key, which must in the same Region where + // you're creating a build. This parameter must also specify the ARN for an + // IAM role that you've set up to give Amazon GameLift access your S3 bucket. + // To call this operation with a storage location, you must have IAM PassRole + // permission. For more details on IAM roles and PassRole permissions, see Set + // up a role for GameLift access (https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html). StorageLocation *S3Location `type:"structure"` // A list of labels to assign to the new build resource. Tags are developer-defined @@ -13302,12 +13283,11 @@ type CreateFleetInput struct { FleetType *string `type:"string" enum:"FleetType"` // A unique identifier for an AWS IAM role that manages access to your AWS services. - // With an instance role ARN set, any application that runs on an instance in - // this fleet can assume the role, including install scripts, server processes, - // and daemons (background processes). Create a role or look up a role's ARN - // from the IAM dashboard (https://console.aws.amazon.com/iam/) in the AWS Management - // Console. Learn more about using on-box credentials for your game servers - // at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). + // Fleets with an instance role ARN allow applications that are running on the + // fleet's instances to assume the role. Learn more about using on-box credentials + // for your game servers at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). + // To call this operation with instance role ARN, you must have IAM PassRole + // permissions. See IAM policy examples for GameLift (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-iam-policy-examples.html). InstanceRoleArn *string `min:"1" type:"string"` // This parameter is no longer used. Instead, to specify where Amazon GameLift @@ -13728,7 +13708,7 @@ type CreateGameServerGroupInput struct { // up. This property cannot be updated after the game server group is created, // and the corresponding Auto Scaling group will always use the property value // that is set with this request, even if the Auto Scaling group is updated - // directly + // directly. VpcSubnets []*string `min:"1" type:"list"` } @@ -14239,19 +14219,23 @@ type CreateMatchmakingConfigurationInput struct { // A flag that determines whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. + // With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE + // to indicate when a completed potential match is waiting for player acceptance. // // AcceptanceRequired is a required field AcceptanceRequired *bool `type:"boolean" required:"true"` // The length of time (in seconds) to wait for players to accept a proposed - // match. If any player rejects the match or fails to accept before the timeout, - // the ticket continues to look for an acceptable match. + // match, if acceptance is required. If any player rejects the match or fails + // to accept before the timeout, the tickets are returned to the ticket pool + // and continue to be evaluated for an acceptable match. AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` // The number of player slots in a match to keep open for future players. For // example, assume that the configuration's rule set specifies a match for a // single 12-person team. If the additional player count is set to 2, only 10 - // players are initially selected for the match. + // players are initially selected for the match. This parameter is not used + // if FlexMatchMode is set to STANDALONE. AdditionalPlayerCount *int64 `type:"integer"` // The method used to backfill game sessions that are created with this matchmaking @@ -14259,7 +14243,8 @@ type CreateMatchmakingConfigurationInput struct { // or does not use the match backfill feature. Specify AUTOMATIC to have GameLift // create a StartMatchBackfill request whenever a game session has one or more // open slots. Learn more about manual and automatic backfill in Backfill Existing - // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html). + // Automatic backfill is not available when FlexMatchMode is set to STANDALONE. BackfillMode *string `type:"string" enum:"BackfillMode"` // Information to be added to all events related to this matchmaking configuration. @@ -14268,28 +14253,40 @@ type CreateMatchmakingConfigurationInput struct { // A human-readable description of the matchmaking configuration. Description *string `min:"1" type:"string"` + // Indicates whether this matchmaking configuration is being used with GameLift + // hosting or as a standalone matchmaking solution. + // + // * STANDALONE - FlexMatch forms matches and returns match information, + // including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded) + // event. + // + // * WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift + // queue to start a game session for the match. + FlexMatchMode *string `type:"string" enum:"FlexMatchMode"` + // A set of custom properties for a game session, formatted as key-value pairs. // These properties are passed to a game server process in the GameSession object // with a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used if FlexMatchMode is set to + // STANDALONE. GameProperties []*GameProperty `type:"list"` // A set of custom game session properties, formatted as a single string value. // This data is passed to a game server process in the GameSession object with // a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used if FlexMatchMode is set to + // STANDALONE. GameSessionData *string `min:"1" type:"string"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a GameLift game session queue resource and uniquely identifies - // it. ARNs are unique across all Regions. These queues are used when placing - // game sessions for matches that are created with this matchmaking configuration. - // Queues can be located in any Region. - // - // GameSessionQueueArns is a required field - GameSessionQueueArns []*string `type:"list" required:"true"` + // it. ARNs are unique across all Regions. Queues can be located in any Region. + // Queues are used to start new GameLift-hosted game sessions for matches that + // are created with this matchmaking configuration. If FlexMatchMode is set + // to STANDALONE, do not set this parameter. + GameSessionQueueArns []*string `type:"list"` // A unique identifier for a matchmaking configuration. This name is used to // identify the configuration associated with a matchmaking request or ticket. @@ -14350,9 +14347,6 @@ func (s *CreateMatchmakingConfigurationInput) Validate() error { if s.GameSessionData != nil && len(*s.GameSessionData) < 1 { invalidParams.Add(request.NewErrParamMinLen("GameSessionData", 1)) } - if s.GameSessionQueueArns == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionQueueArns")) - } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -14431,6 +14425,12 @@ func (s *CreateMatchmakingConfigurationInput) SetDescription(v string) *CreateMa return s } +// SetFlexMatchMode sets the FlexMatchMode field's value. +func (s *CreateMatchmakingConfigurationInput) SetFlexMatchMode(v string) *CreateMatchmakingConfigurationInput { + s.FlexMatchMode = &v + return s +} + // SetGameProperties sets the GameProperties field's value. func (s *CreateMatchmakingConfigurationInput) SetGameProperties(v []*GameProperty) *CreateMatchmakingConfigurationInput { s.GameProperties = v @@ -14812,14 +14812,15 @@ type CreateScriptInput struct { // need to be unique. You can use UpdateScript to change this value later. Name *string `min:"1" type:"string"` - // The location of the Amazon S3 bucket where a zipped file containing your - // Realtime scripts is stored. The storage location must specify the Amazon - // S3 bucket name, the zip file name (the "key"), and a role ARN that allows - // Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must - // be in the same Region where you want to create a new script. By default, - // Amazon GameLift uploads the latest version of the zip file; if you have S3 - // object versioning turned on, you can use the ObjectVersion parameter to specify - // an earlier version. + // The Amazon S3 location of your Realtime scripts. The storage location must + // specify the S3 bucket name, the zip file name (the "key"), and an IAM role + // ARN that allows Amazon GameLift to access the S3 storage location. The S3 + // bucket must be in the same Region where you are creating a new script. By + // default, Amazon GameLift uploads the latest version of the zip file; if you + // have S3 object versioning turned on, you can use the ObjectVersion parameter + // to specify an earlier version. To call this operation with a storage location, + // you must have IAM PassRole permission. For more details on IAM roles and + // PassRole permissions, see Set up a role for GameLift access (https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html). StorageLocation *S3Location `type:"structure"` // A list of labels to assign to the new script resource. Tags are developer-defined @@ -15297,8 +15298,8 @@ type DeleteGameServerGroupInput struct { // The type of delete to perform. Options include the following: // - // * SAFE_DELETE – Terminates the game server group and EC2 Auto Scaling - // group only when it has no game servers that are in UTILIZED status. + // * SAFE_DELETE – (default) Terminates the game server group and EC2 Auto + // Scaling group only when it has no game servers that are in UTILIZED status. // // * FORCE_DELETE – Terminates the game server group, including all active // game servers regardless of their utilization status, and the EC2 Auto @@ -18576,12 +18577,6 @@ type FleetAttributes struct { FleetType *string `type:"string" enum:"FleetType"` // A unique identifier for an AWS IAM role that manages access to your AWS services. - // With an instance role ARN set, any application that runs on an instance in - // this fleet can assume the role, including install scripts, server processes, - // and daemons (background processes). Create a role or look up a role's ARN - // from the IAM dashboard (https://console.aws.amazon.com/iam/) in the AWS Management - // Console. Learn more about using on-box credentials for your game servers - // at Access external resources from a game server (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html). InstanceRoleArn *string `min:"1" type:"string"` // EC2 instance type indicating the computing resources of each instance in @@ -19683,7 +19678,7 @@ type GameSession struct { // session. It is in JSON syntax, formatted as a string. In addition the matchmaking // configuration used, it contains data on all players assigned to the match, // including player attributes and team assignments. For more details on matchmaker - // data, see Match Data (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). + // data, see Match Data (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data). // Matchmaker data is useful when requesting match backfills, and is updated // whenever new players are added during a successful backfill (see StartMatchBackfill). MatchmakerData *string `min:"1" type:"string"` @@ -19835,12 +19830,12 @@ func (s *GameSession) SetTerminationTime(v time.Time) *GameSession { return s } -// Connection information for the new game session that is created with matchmaking. -// (with StartMatchmaking). Once a match is set, the FlexMatch engine places -// the match and creates a new game session for it. This information, including -// the game session endpoint and player sessions for each player in the original -// matchmaking request, is added to the MatchmakingTicket, which can be retrieved -// by calling DescribeMatchmaking. +// Connection information for a new game session that is created in response +// to a StartMatchmaking request. Once a match is made, the FlexMatch engine +// creates a new game session for it. This information, including the game session +// endpoint and player sessions for each player in the original matchmaking +// request, is added to the MatchmakingTicket, which can be retrieved by calling +// DescribeMatchmaking. type GameSessionConnectionInfo struct { _ struct{} `type:"structure"` @@ -20083,7 +20078,7 @@ type GameSessionPlacement struct { // formatted as a string. It identifies the matchmaking configuration used to // create the match, and contains data on all players assigned to the match, // including player attributes and team assignments. For more details on matchmaker - // data, see Match Data (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). + // data, see Match Data (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data). MatchmakerData *string `min:"1" type:"string"` // The maximum number of players that can be connected simultaneously to the @@ -22068,24 +22063,29 @@ type MatchmakingConfiguration struct { // A flag that indicates whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. + // When this option is enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE + // to indicate when a completed potential match is waiting for player acceptance. AcceptanceRequired *bool `type:"boolean"` // The length of time (in seconds) to wait for players to accept a proposed - // match. If any player rejects the match or fails to accept before the timeout, - // the ticket continues to look for an acceptable match. + // match, if acceptance is required. If any player rejects the match or fails + // to accept before the timeout, the tickets are returned to the ticket pool + // and continue to be evaluated for an acceptable match. AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` // The number of player slots in a match to keep open for future players. For // example, assume that the configuration's rule set specifies a match for a // single 12-person team. If the additional player count is set to 2, only 10 - // players are initially selected for the match. + // players are initially selected for the match. This parameter is not used + // when FlexMatchMode is set to STANDALONE. AdditionalPlayerCount *int64 `type:"integer"` // The method used to backfill game sessions created with this matchmaking configuration. // MANUAL indicates that the game makes backfill requests or does not use the // match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill // requests whenever a game session has one or more open slots. Learn more about - // manual and automatic backfill in Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + // manual and automatic backfill in Backfill Existing Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html). + // Automatic backfill is not available when FlexMatchMode is set to STANDALONE. BackfillMode *string `type:"string" enum:"BackfillMode"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) @@ -22104,25 +22104,39 @@ type MatchmakingConfiguration struct { // A descriptive label that is associated with matchmaking configuration. Description *string `min:"1" type:"string"` + // Indicates whether this matchmaking configuration is being used with GameLift + // hosting or as a standalone matchmaking solution. + // + // * STANDALONE - FlexMatch forms matches and returns match information, + // including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded) + // event. + // + // * WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift + // queue to start a game session for the match. + FlexMatchMode *string `type:"string" enum:"FlexMatchMode"` + // A set of custom properties for a game session, formatted as key-value pairs. // These properties are passed to a game server process in the GameSession object // with a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used when FlexMatchMode is set + // to STANDALONE. GameProperties []*GameProperty `type:"list"` // A set of custom game session properties, formatted as a single string value. // This data is passed to a game server process in the GameSession object with // a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used when FlexMatchMode is set + // to STANDALONE. GameSessionData *string `min:"1" type:"string"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a GameLift game session queue resource and uniquely identifies - // it. ARNs are unique across all Regions. GameLift uses the listed queues when - // placing game sessions for matches that are created with this matchmaking - // configuration. Queues can be located in any Region. + // it. ARNs are unique across all Regions. Queues can be located in any Region. + // Queues are used to start new GameLift-hosted game sessions for matches that + // are created with this matchmaking configuration. Thais property is not set + // when FlexMatchMode is set to STANDALONE. GameSessionQueueArns []*string `type:"list"` // A unique identifier for a matchmaking configuration. This name is used to @@ -22206,6 +22220,12 @@ func (s *MatchmakingConfiguration) SetDescription(v string) *MatchmakingConfigur return s } +// SetFlexMatchMode sets the FlexMatchMode field's value. +func (s *MatchmakingConfiguration) SetFlexMatchMode(v string) *MatchmakingConfiguration { + s.FlexMatchMode = &v + return s +} + // SetGameProperties sets the GameProperties field's value. func (s *MatchmakingConfiguration) SetGameProperties(v []*GameProperty) *MatchmakingConfiguration { s.GameProperties = v @@ -22261,7 +22281,7 @@ func (s *MatchmakingConfiguration) SetRuleSetName(v string) *MatchmakingConfigur // // A rule set may define the following elements for a match. For detailed information // and examples showing how to construct a rule set, see Build a FlexMatch Rule -// Set (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html). +// Set (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html). // // * Teams -- Required. A rule set must define one or multiple teams for // the match and set minimum and maximum team sizes. For example, a rule @@ -22372,7 +22392,8 @@ type MatchmakingTicket struct { // Identifier and connection information of the game session created for the // match. This information is added to the ticket only after the matchmaking - // request has been successfully completed. + // request has been successfully completed. This parameter is not set when FlexMatch + // is being used without GameLift hosting. GameSessionConnectionInfo *GameSessionConnectionInfo `type:"structure"` // A set of Player objects, each representing a player to find matches for. @@ -24758,9 +24779,7 @@ type StartMatchBackfillInput struct { // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a game session and uniquely identifies it. This is the // same as the game session ID. - // - // GameSessionArn is a required field - GameSessionArn *string `min:"1" type:"string" required:"true"` + GameSessionArn *string `min:"1" type:"string"` // Match information on all players that are currently assigned to the game // session. This information is used by the matchmaker to find new players and @@ -24769,7 +24788,7 @@ type StartMatchBackfillInput struct { // * PlayerID, PlayerAttributes, Team -\\- This information is maintained // in the GameSession object, MatchmakerData property, for all players who // are currently assigned to the game session. The matchmaker data is in - // JSON syntax, formatted as a string. For more details, see Match Data (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). + // JSON syntax, formatted as a string. For more details, see Match Data (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data). // // * LatencyInMs -\\- If the matchmaker uses player latency, include a latency // value, in milliseconds, for the Region that the game session is currently @@ -24803,9 +24822,6 @@ func (s *StartMatchBackfillInput) Validate() error { if s.ConfigurationName != nil && len(*s.ConfigurationName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ConfigurationName", 1)) } - if s.GameSessionArn == nil { - invalidParams.Add(request.NewErrParamRequired("GameSessionArn")) - } if s.GameSessionArn != nil && len(*s.GameSessionArn) < 1 { invalidParams.Add(request.NewErrParamMinLen("GameSessionArn", 1)) } @@ -26845,17 +26861,21 @@ type UpdateMatchmakingConfigurationInput struct { // A flag that indicates whether a match that was created with this configuration // must be accepted by the matched players. To require acceptance, set to TRUE. + // With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE + // to indicate when a completed potential match is waiting for player acceptance. AcceptanceRequired *bool `type:"boolean"` // The length of time (in seconds) to wait for players to accept a proposed - // match. If any player rejects the match or fails to accept before the timeout, - // the ticket continues to look for an acceptable match. + // match, if acceptance is required. If any player rejects the match or fails + // to accept before the timeout, the tickets are returned to the ticket pool + // and continue to be evaluated for an acceptable match. AcceptanceTimeoutSeconds *int64 `min:"1" type:"integer"` // The number of player slots in a match to keep open for future players. For // example, assume that the configuration's rule set specifies a match for a // single 12-person team. If the additional player count is set to 2, only 10 - // players are initially selected for the match. + // players are initially selected for the match. This parameter is not used + // if FlexMatchMode is set to STANDALONE. AdditionalPlayerCount *int64 `type:"integer"` // The method that is used to backfill game sessions created with this matchmaking @@ -26863,7 +26883,8 @@ type UpdateMatchmakingConfigurationInput struct { // or does not use the match backfill feature. Specify AUTOMATIC to have GameLift // create a StartMatchBackfill request whenever a game session has one or more // open slots. Learn more about manual and automatic backfill in Backfill Existing - // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html). + // Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html). + // Automatic backfill is not available when FlexMatchMode is set to STANDALONE. BackfillMode *string `type:"string" enum:"BackfillMode"` // Information to add to all events related to the matchmaking configuration. @@ -26872,25 +26893,39 @@ type UpdateMatchmakingConfigurationInput struct { // A descriptive label that is associated with matchmaking configuration. Description *string `min:"1" type:"string"` + // Indicates whether this matchmaking configuration is being used with GameLift + // hosting or as a standalone matchmaking solution. + // + // * STANDALONE - FlexMatch forms matches and returns match information, + // including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded) + // event. + // + // * WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift + // queue to start a game session for the match. + FlexMatchMode *string `type:"string" enum:"FlexMatchMode"` + // A set of custom properties for a game session, formatted as key-value pairs. // These properties are passed to a game server process in the GameSession object // with a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used if FlexMatchMode is set to + // STANDALONE. GameProperties []*GameProperty `type:"list"` // A set of custom game session properties, formatted as a single string value. // This data is passed to a game server process in the GameSession object with // a request to start a new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)). // This information is added to the new GameSession object that is created for - // a successful match. + // a successful match. This parameter is not used if FlexMatchMode is set to + // STANDALONE. GameSessionData *string `min:"1" type:"string"` // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a GameLift game session queue resource and uniquely identifies - // it. ARNs are unique across all Regions. These queues are used when placing - // game sessions for matches that are created with this matchmaking configuration. - // Queues can be located in any Region. + // it. ARNs are unique across all Regions. Queues can be located in any Region. + // Queues are used to start new GameLift-hosted game sessions for matches that + // are created with this matchmaking configuration. If FlexMatchMode is set + // to STANDALONE, do not set this parameter. GameSessionQueueArns []*string `type:"list"` // A unique identifier for a matchmaking configuration to update. You can use @@ -26900,7 +26935,7 @@ type UpdateMatchmakingConfigurationInput struct { Name *string `min:"1" type:"string" required:"true"` // An SNS topic ARN that is set up to receive matchmaking notifications. See - // Setting up Notifications for Matchmaking (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) + // Setting up Notifications for Matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) // for more information. NotificationTarget *string `type:"string"` @@ -27002,6 +27037,12 @@ func (s *UpdateMatchmakingConfigurationInput) SetDescription(v string) *UpdateMa return s } +// SetFlexMatchMode sets the FlexMatchMode field's value. +func (s *UpdateMatchmakingConfigurationInput) SetFlexMatchMode(v string) *UpdateMatchmakingConfigurationInput { + s.FlexMatchMode = &v + return s +} + // SetGameProperties sets the GameProperties field's value. func (s *UpdateMatchmakingConfigurationInput) SetGameProperties(v []*GameProperty) *UpdateMatchmakingConfigurationInput { s.GameProperties = v @@ -27171,14 +27212,15 @@ type UpdateScriptInput struct { // ScriptId is a required field ScriptId *string `type:"string" required:"true"` - // The location of the Amazon S3 bucket where a zipped file containing your - // Realtime scripts is stored. The storage location must specify the Amazon - // S3 bucket name, the zip file name (the "key"), and a role ARN that allows - // Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must - // be in the same Region where you want to create a new script. By default, + // The Amazon S3 location of your Realtime scripts. The storage location must + // specify the S3 bucket name, the zip file name (the "key"), and an IAM role + // ARN that allows Amazon GameLift to access the S3 storage location. The S3 + // bucket must be in the same Region as the script you're updating. By default, // Amazon GameLift uploads the latest version of the zip file; if you have S3 // object versioning turned on, you can use the ObjectVersion parameter to specify - // an earlier version. + // an earlier version. To call this operation with a storage location, you must + // have IAM PassRole permission. For more details on IAM roles and PassRole + // permissions, see Set up a role for GameLift access (https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html). StorageLocation *S3Location `type:"structure"` // The version that is associated with a build or script. Version strings do @@ -27757,6 +27799,30 @@ const ( // EC2InstanceTypeC524xlarge is a EC2InstanceType enum value EC2InstanceTypeC524xlarge = "c5.24xlarge" + // EC2InstanceTypeC5aLarge is a EC2InstanceType enum value + EC2InstanceTypeC5aLarge = "c5a.large" + + // EC2InstanceTypeC5aXlarge is a EC2InstanceType enum value + EC2InstanceTypeC5aXlarge = "c5a.xlarge" + + // EC2InstanceTypeC5a2xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a2xlarge = "c5a.2xlarge" + + // EC2InstanceTypeC5a4xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a4xlarge = "c5a.4xlarge" + + // EC2InstanceTypeC5a8xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a8xlarge = "c5a.8xlarge" + + // EC2InstanceTypeC5a12xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a12xlarge = "c5a.12xlarge" + + // EC2InstanceTypeC5a16xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a16xlarge = "c5a.16xlarge" + + // EC2InstanceTypeC5a24xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5a24xlarge = "c5a.24xlarge" + // EC2InstanceTypeR3Large is a EC2InstanceType enum value EC2InstanceTypeR3Large = "r3.large" @@ -27814,6 +27880,30 @@ const ( // EC2InstanceTypeR524xlarge is a EC2InstanceType enum value EC2InstanceTypeR524xlarge = "r5.24xlarge" + // EC2InstanceTypeR5aLarge is a EC2InstanceType enum value + EC2InstanceTypeR5aLarge = "r5a.large" + + // EC2InstanceTypeR5aXlarge is a EC2InstanceType enum value + EC2InstanceTypeR5aXlarge = "r5a.xlarge" + + // EC2InstanceTypeR5a2xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a2xlarge = "r5a.2xlarge" + + // EC2InstanceTypeR5a4xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a4xlarge = "r5a.4xlarge" + + // EC2InstanceTypeR5a8xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a8xlarge = "r5a.8xlarge" + + // EC2InstanceTypeR5a12xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a12xlarge = "r5a.12xlarge" + + // EC2InstanceTypeR5a16xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a16xlarge = "r5a.16xlarge" + + // EC2InstanceTypeR5a24xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5a24xlarge = "r5a.24xlarge" + // EC2InstanceTypeM3Medium is a EC2InstanceType enum value EC2InstanceTypeM3Medium = "m3.medium" @@ -27864,6 +27954,30 @@ const ( // EC2InstanceTypeM524xlarge is a EC2InstanceType enum value EC2InstanceTypeM524xlarge = "m5.24xlarge" + + // EC2InstanceTypeM5aLarge is a EC2InstanceType enum value + EC2InstanceTypeM5aLarge = "m5a.large" + + // EC2InstanceTypeM5aXlarge is a EC2InstanceType enum value + EC2InstanceTypeM5aXlarge = "m5a.xlarge" + + // EC2InstanceTypeM5a2xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a2xlarge = "m5a.2xlarge" + + // EC2InstanceTypeM5a4xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a4xlarge = "m5a.4xlarge" + + // EC2InstanceTypeM5a8xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a8xlarge = "m5a.8xlarge" + + // EC2InstanceTypeM5a12xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a12xlarge = "m5a.12xlarge" + + // EC2InstanceTypeM5a16xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a16xlarge = "m5a.16xlarge" + + // EC2InstanceTypeM5a24xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5a24xlarge = "m5a.24xlarge" ) // EC2InstanceType_Values returns all elements of the EC2InstanceType enum @@ -27891,6 +28005,14 @@ func EC2InstanceType_Values() []string { EC2InstanceTypeC512xlarge, EC2InstanceTypeC518xlarge, EC2InstanceTypeC524xlarge, + EC2InstanceTypeC5aLarge, + EC2InstanceTypeC5aXlarge, + EC2InstanceTypeC5a2xlarge, + EC2InstanceTypeC5a4xlarge, + EC2InstanceTypeC5a8xlarge, + EC2InstanceTypeC5a12xlarge, + EC2InstanceTypeC5a16xlarge, + EC2InstanceTypeC5a24xlarge, EC2InstanceTypeR3Large, EC2InstanceTypeR3Xlarge, EC2InstanceTypeR32xlarge, @@ -27910,6 +28032,14 @@ func EC2InstanceType_Values() []string { EC2InstanceTypeR512xlarge, EC2InstanceTypeR516xlarge, EC2InstanceTypeR524xlarge, + EC2InstanceTypeR5aLarge, + EC2InstanceTypeR5aXlarge, + EC2InstanceTypeR5a2xlarge, + EC2InstanceTypeR5a4xlarge, + EC2InstanceTypeR5a8xlarge, + EC2InstanceTypeR5a12xlarge, + EC2InstanceTypeR5a16xlarge, + EC2InstanceTypeR5a24xlarge, EC2InstanceTypeM3Medium, EC2InstanceTypeM3Large, EC2InstanceTypeM3Xlarge, @@ -27927,6 +28057,14 @@ func EC2InstanceType_Values() []string { EC2InstanceTypeM512xlarge, EC2InstanceTypeM516xlarge, EC2InstanceTypeM524xlarge, + EC2InstanceTypeM5aLarge, + EC2InstanceTypeM5aXlarge, + EC2InstanceTypeM5a2xlarge, + EC2InstanceTypeM5a4xlarge, + EC2InstanceTypeM5a8xlarge, + EC2InstanceTypeM5a12xlarge, + EC2InstanceTypeM5a16xlarge, + EC2InstanceTypeM5a24xlarge, } } @@ -28142,6 +28280,22 @@ func FleetType_Values() []string { } } +const ( + // FlexMatchModeStandalone is a FlexMatchMode enum value + FlexMatchModeStandalone = "STANDALONE" + + // FlexMatchModeWithQueue is a FlexMatchMode enum value + FlexMatchModeWithQueue = "WITH_QUEUE" +) + +// FlexMatchMode_Values returns all elements of the FlexMatchMode enum +func FlexMatchMode_Values() []string { + return []string{ + FlexMatchModeStandalone, + FlexMatchModeWithQueue, + } +} + const ( // GameServerClaimStatusClaimed is a GameServerClaimStatus enum value GameServerClaimStatusClaimed = "CLAIMED" diff --git a/service/iotsitewise/api.go b/service/iotsitewise/api.go index a6500d4c8cb..5276377b726 100644 --- a/service/iotsitewise/api.go +++ b/service/iotsitewise/api.go @@ -1169,8 +1169,8 @@ func (c *IoTSiteWise) CreatePresignedPortalUrlRequest(input *CreatePresignedPort // Creates a pre-signed URL to a portal. Use this operation to create URLs to // portals that use AWS Identity and Access Management (IAM) to authenticate // users. An IAM user with access to a portal can call this API to get a URL -// to that portal. The URL contains a session token that lets the IAM user access -// the portal. +// to that portal. The URL contains an authentication token that lets the IAM +// user access the portal. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2511,6 +2511,100 @@ func (c *IoTSiteWise) DescribeDashboardWithContext(ctx aws.Context, input *Descr return out, req.Send() } +const opDescribeDefaultEncryptionConfiguration = "DescribeDefaultEncryptionConfiguration" + +// DescribeDefaultEncryptionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDefaultEncryptionConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDefaultEncryptionConfiguration for more information on using the DescribeDefaultEncryptionConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDefaultEncryptionConfigurationRequest method. +// req, resp := client.DescribeDefaultEncryptionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/DescribeDefaultEncryptionConfiguration +func (c *IoTSiteWise) DescribeDefaultEncryptionConfigurationRequest(input *DescribeDefaultEncryptionConfigurationInput) (req *request.Request, output *DescribeDefaultEncryptionConfigurationOutput) { + op := &request.Operation{ + Name: opDescribeDefaultEncryptionConfiguration, + HTTPMethod: "GET", + HTTPPath: "/configuration/account/encryption", + } + + if input == nil { + input = &DescribeDefaultEncryptionConfigurationInput{} + } + + output = &DescribeDefaultEncryptionConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDefaultEncryptionConfiguration API operation for AWS IoT SiteWise. +// +// Retrieves information about the default encryption configuration for the +// AWS account in the default or specified region. For more information, see +// Key management (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/key-management.html) +// in the AWS IoT SiteWise User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT SiteWise's +// API operation DescribeDefaultEncryptionConfiguration for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request isn't valid. This can occur if your request contains malformed +// JSON or unsupported characters. Check your request and try again. +// +// * InternalFailureException +// AWS IoT SiteWise can't process your request right now. Try again later. +// +// * ThrottlingException +// Your request exceeded a rate limit. For example, you might have exceeded +// the number of AWS IoT SiteWise assets that can be created per second, the +// allowed number of messages per second, and so on. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the AWS IoT SiteWise User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/DescribeDefaultEncryptionConfiguration +func (c *IoTSiteWise) DescribeDefaultEncryptionConfiguration(input *DescribeDefaultEncryptionConfigurationInput) (*DescribeDefaultEncryptionConfigurationOutput, error) { + req, out := c.DescribeDefaultEncryptionConfigurationRequest(input) + return out, req.Send() +} + +// DescribeDefaultEncryptionConfigurationWithContext is the same as DescribeDefaultEncryptionConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDefaultEncryptionConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) DescribeDefaultEncryptionConfigurationWithContext(ctx aws.Context, input *DescribeDefaultEncryptionConfigurationInput, opts ...request.Option) (*DescribeDefaultEncryptionConfigurationOutput, error) { + req, out := c.DescribeDefaultEncryptionConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeGateway = "DescribeGateway" // DescribeGatewayRequest generates a "aws/request.Request" representing the @@ -5019,6 +5113,111 @@ func (c *IoTSiteWise) ListTagsForResourceWithContext(ctx aws.Context, input *Lis return out, req.Send() } +const opPutDefaultEncryptionConfiguration = "PutDefaultEncryptionConfiguration" + +// PutDefaultEncryptionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutDefaultEncryptionConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDefaultEncryptionConfiguration for more information on using the PutDefaultEncryptionConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutDefaultEncryptionConfigurationRequest method. +// req, resp := client.PutDefaultEncryptionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/PutDefaultEncryptionConfiguration +func (c *IoTSiteWise) PutDefaultEncryptionConfigurationRequest(input *PutDefaultEncryptionConfigurationInput) (req *request.Request, output *PutDefaultEncryptionConfigurationOutput) { + op := &request.Operation{ + Name: opPutDefaultEncryptionConfiguration, + HTTPMethod: "POST", + HTTPPath: "/configuration/account/encryption", + } + + if input == nil { + input = &PutDefaultEncryptionConfigurationInput{} + } + + output = &PutDefaultEncryptionConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDefaultEncryptionConfiguration API operation for AWS IoT SiteWise. +// +// Sets the default encryption configuration for the AWS account. For more information, +// see Key management (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/key-management.html) +// in the AWS IoT SiteWise User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT SiteWise's +// API operation PutDefaultEncryptionConfiguration for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request isn't valid. This can occur if your request contains malformed +// JSON or unsupported characters. Check your request and try again. +// +// * InternalFailureException +// AWS IoT SiteWise can't process your request right now. Try again later. +// +// * ThrottlingException +// Your request exceeded a rate limit. For example, you might have exceeded +// the number of AWS IoT SiteWise assets that can be created per second, the +// allowed number of messages per second, and so on. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the AWS IoT SiteWise User Guide. +// +// * LimitExceededException +// You've reached the limit for a resource. For example, this can occur if you're +// trying to associate more than the allowed number of child assets or attempting +// to create more than the allowed number of properties for an asset model. +// +// For more information, see Quotas (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) +// in the AWS IoT SiteWise User Guide. +// +// * ConflictingOperationException +// Your request has conflicting operations. This can occur if you're trying +// to perform more than one operation on the same resource at the same time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iotsitewise-2019-12-02/PutDefaultEncryptionConfiguration +func (c *IoTSiteWise) PutDefaultEncryptionConfiguration(input *PutDefaultEncryptionConfigurationInput) (*PutDefaultEncryptionConfigurationOutput, error) { + req, out := c.PutDefaultEncryptionConfigurationRequest(input) + return out, req.Send() +} + +// PutDefaultEncryptionConfigurationWithContext is the same as PutDefaultEncryptionConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutDefaultEncryptionConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoTSiteWise) PutDefaultEncryptionConfigurationWithContext(ctx aws.Context, input *PutDefaultEncryptionConfigurationInput, opts ...request.Option) (*PutDefaultEncryptionConfigurationOutput, error) { + req, out := c.PutDefaultEncryptionConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutLoggingOptions = "PutLoggingOptions" // PutLoggingOptionsRequest generates a "aws/request.Request" representing the @@ -7902,6 +8101,69 @@ func (s *BatchPutAssetPropertyValueOutput) SetErrorEntries(v []*BatchPutAssetPro return s } +type ConfigurationErrorDetails struct { + _ struct{} `type:"structure"` + + // Code is a required field + Code *string `locationName:"code" type:"string" required:"true" enum:"ErrorCode"` + + // Message is a required field + Message *string `locationName:"message" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfigurationErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationErrorDetails) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ConfigurationErrorDetails) SetCode(v string) *ConfigurationErrorDetails { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ConfigurationErrorDetails) SetMessage(v string) *ConfigurationErrorDetails { + s.Message = &v + return s +} + +type ConfigurationStatus struct { + _ struct{} `type:"structure"` + + Error *ConfigurationErrorDetails `locationName:"error" type:"structure"` + + // State is a required field + State *string `locationName:"state" type:"string" required:"true" enum:"ConfigurationState"` +} + +// String returns the string representation +func (s ConfigurationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigurationStatus) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *ConfigurationStatus) SetError(v *ConfigurationErrorDetails) *ConfigurationStatus { + s.Error = v + return s +} + +// SetState sets the State field's value. +func (s *ConfigurationStatus) SetState(v string) *ConfigurationStatus { + s.State = &v + return s +} + // Your request has conflicting operations. This can occur if you're trying // to perform more than one operation on the same resource at the same time. type ConflictingOperationException struct { @@ -8951,7 +9213,7 @@ type CreatePresignedPortalUrlInput struct { // The duration (in seconds) for which the session at the URL is valid. // - // Default: 900 seconds (15 minutes) + // Default: 43,200 seconds (12 hours) SessionDurationSeconds *int64 `location:"querystring" locationName:"sessionDurationSeconds" min:"900" type:"integer"` } @@ -8999,10 +9261,10 @@ func (s *CreatePresignedPortalUrlInput) SetSessionDurationSeconds(v int64) *Crea type CreatePresignedPortalUrlOutput struct { _ struct{} `type:"structure"` - // The pre-signed URL to the portal. The URL contains the portal ID and a session + // The pre-signed URL to the portal. The URL contains the portal ID and an authentication // token that lets you access the portal. The URL has the following format. // - // https://